diff --git a/.gitattributes b/.gitattributes index 1ef325f1b111266a6b26e0196871bd78baa8c2f3..5eedfe920df869e11808e94670b403a24aaebd0c 100644 --- a/.gitattributes +++ b/.gitattributes @@ -57,3 +57,185 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text # Video files - compressed *.mp4 filter=lfs diff=lfs merge=lfs -text *.webm filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/_cffi_backend.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pytrec_eval_ext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/setuptools/cli-arm64.exe filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/pyparsing.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/setuptools/gui-arm64.exe filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/setuptools/_vendor/more_itertools/__pycache__/more.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/setuptools/_vendor/__pycache__/pyparsing.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/regex/__pycache__/_regex_core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/regex/__pycache__/test_regex.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_azurefs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_csv.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_feather.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_gcsfs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_hdfs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_json.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_orc.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_parquet.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_parquet_encryption.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_s3fs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_substrait.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.2100 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.2100 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow_python.so.2100 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow_python_flight.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow_python_flight.so.2100 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow_python.so.2100.0.0 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow_python_flight.so.2100.0.0 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow_compute.so.2100 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.2100 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.2100 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libparquet.so.2100 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow.so.2100 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_array.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_compute.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dataset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_pandas.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/blake3/blake3.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/hf_xet/hf_xet.abi3.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/chardet/__pycache__/johabfreq.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99.so.0.0.0 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99-934c22de.so.0.0.0 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/scipy.libs/libscipy_openblas-68440149.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/framenet.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/aiohttp/_websocket/mask.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/aiohttp/_websocket/reader_c.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/dataclasses.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/msgspec/_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/rich/__pycache__/_emoji_codes.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyparsing/__pycache__/core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/jiter/jiter.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/tree_sitter_python/_binding.abi3.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/uvloop/loop.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy.libs/libquadmath-96973f99-934c22de.so.0.0.0 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy.libs/libscipy_openblas64_-56d6093b.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/torchvision.libs/libz.d13a2644.so.1 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/torchvision.libs/libcudart.41118559.so.12 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/torchvision.libs/libwebp.54a0d02a.so.7 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/torchvision.libs/libjpeg.1c1c4b09.so.8 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/torchvision.libs/libpng16.0364a1db.so.16 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/torchvision.libs/libnvjpeg.02b6d700.so.12 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/_raylet.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/_private/runtime_env/agent/thirdparty_files/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/_private/runtime_env/agent/thirdparty_files/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/_private/runtime_env/agent/thirdparty_files/aiohttp/_websocket/mask.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/_private/runtime_env/agent/thirdparty_files/aiohttp/_websocket/reader_c.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/_private/runtime_env/agent/thirdparty_files/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/_private/runtime_env/agent/thirdparty_files/multidict/_multidict.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/_private/runtime_env/agent/thirdparty_files/idna/__pycache__/idnadata.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/_private/runtime_env/agent/thirdparty_files/propcache/_helpers_c.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/_private/runtime_env/agent/thirdparty_files/idna/__pycache__/uts46data.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/_private/runtime_env/agent/thirdparty_files/yarl/_quoting_c.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/_private/runtime_env/agent/thirdparty_files/__pycache__/typing_extensions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/_private/__pycache__/worker.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/core/libjemalloc.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/core/src/ray/gcs/gcs_server filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/core/src/ray/raylet/raylet filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/data/__pycache__/dataset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/data/__pycache__/read_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/jars/ray_dist.jar filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/rllib/algorithms/__pycache__/algorithm.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/rllib/algorithms/__pycache__/algorithm_config.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ray/thirdparty_files/psutil/_psutil_linux.abi3.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/mistral_common/data/mistral_instruct_tokenizer_241114.model.v7 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/mistral_common/data/mistral_instruct_tokenizer_241114.model.v7m1 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/mistral_common/data/tokenizer.model.v1 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/mistral_common/data/tekken_240718.json filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/mistral_common/data/tekken_240911.json filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/vllm/_flashmla_C.abi3.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/vllm/_moe_C.abi3.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/vllm/_C.abi3.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/mistral_common/data/mistral_instruct_tokenizer_240216.model.v2 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/mistral_common/data/mistral_instruct_tokenizer_240323.model.v3 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/vllm/third_party/__pycache__/pynvml.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/vllm/__pycache__/config.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/vllm/vllm_flash_attn/_vllm_fa2_C.abi3.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/ml_dtypes/_ml_dtypes_ext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pycparser/__pycache__/yacctab.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/vllm/vllm_flash_attn/_vllm_fa3_C.abi3.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/docutils/utils/math/__pycache__/math2html.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/docutils/__pycache__/nodes.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/yaml/_yaml.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cuda_nvcc/bin/ptxas filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cuda_nvcc/nvvm/libdevice/libdevice.10.bc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cuda_nvcc/nvvm/lib64/libnvvm.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cu13/lib/libcudadevrt.a filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cu13/lib/libcudart.so.13 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cu13/lib/libcudart_static.a filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cu13/nvvm/libdevice/libdevice.10.bc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cu13/bin/fatbinary filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cu13/bin/cudafe++ filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cu13/bin/nvcc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cu13/bin/nvlink filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cu13/bin/ptxas filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cu13/nvvm/bin/cicc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cu13/lib/libnvvm.so.4 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/curand/lib/libcurand.so.10 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cufile/lib/libcufile.so.0 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.4 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.12 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libpcsamplingutil.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.12 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn.so.9 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.11 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_graph.so.9 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_engines_runtime_compiled.so.9 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cusolver/lib/libcusolverMg.so.11 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cusolver/lib/libcusolver.so.11 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_heuristic.so.9 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops.so.9 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/libcudart.so.12 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cusparse/lib/libcusparse.so.12 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv.so.9 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/ma/tests/__pycache__/test_core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/ma/__pycache__/core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_engines_precompiled.so.9 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/linalg/_umath_linalg.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/linalg/__pycache__/_linalg.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/random/_mt19937.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/random/bit_generator.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/random/_bounded_integers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/random/_common.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/random/mtrand.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/random/_pcg64.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/random/_philox.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/random/_generator.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/_core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/_core/_simd.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/venv/lib/python3.10/site-packages/_cffi_backend.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/_cffi_backend.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..aed2d1d92971e4e1671e0a17a9184f3cc081e00e --- /dev/null +++ b/venv/lib/python3.10/site-packages/_cffi_backend.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:770ca3f1c3a27ad0ad6f27d1ddbba31d48c719279ab6f91a3349286cf0145983 +size 339896 diff --git a/venv/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20a2c6038c08dd4aca6193ed7bd8d1c78479fabd --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bb566a80ddf2838844613b87ac7481d3d2975e603129391544b8b8579724584 +size 134031 diff --git a/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/dataclasses.cpython-310.pyc b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/dataclasses.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..961fdb078c649477693a4a4bb641c89e1a783813 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate/utils/__pycache__/dataclasses.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c45bcadc3f6404ccd8cb428d45006a07a51b5cf5dd70696cb746cb3322d4460 +size 105306 diff --git a/venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1d5f985f12327a28d87206244823dd253072ea86 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aed2e31e80c0f47db713b07783777a3a912721a58e8e5cd19cd9b874d828891a +size 2736176 diff --git a/venv/lib/python3.10/site-packages/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..160930c9b1d8beb7990e1ca1610a6db02532f298 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64be0af6db218594034bb94838207e58f7dc620109b2c42d9c118b9480d84345 +size 476088 diff --git a/venv/lib/python3.10/site-packages/aiohttp/_websocket/mask.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/aiohttp/_websocket/mask.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..709deef054b3973dc66b7619cb88419a1a1ee36d --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_websocket/mask.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ba3b822c194b04a0b164296557a23b7f6b4dd1ffb9fc787e66074b593ed74f6 +size 220664 diff --git a/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader_c.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader_c.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..94115af386849d23fdfad4e1b9b07f998fba0566 --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_websocket/reader_c.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84bb8ed741ef290d7575977ba13ade7133f7dc7e04cef7e6b72eabcdc9fcf736 +size 1694720 diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_layernorm_mainloop_fusion.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_layernorm_mainloop_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..3fe842a04096e85751a2a210ea5b5f9b458d8bbb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_layernorm_mainloop_fusion.h @@ -0,0 +1,789 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Template for a multistage GEMM kernel with layernorm operations fused in mainloop. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" +#include "cutlass/gemm/kernel/params_universal_base.h" + +#include "cutlass/layout/matrix.h" + +#include "cutlass/trace.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmLayernormMainloopFusion { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + using ElementScaleBias = typename Mma::IteratorVarMean::Element; + using LayoutScaleBias = typename Mma::IteratorVarMean::Layout; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Split-K preserves splits that are 128b aligned + static int const kSplitKAlignment = const_max(128 / sizeof_bits::value, 128 / sizeof_bits::value); + + // + // Structures + // + + /// Argument structure + struct Arguments : UniversalArgumentsBase + { + // + // Data members + // + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_var; + void const * ptr_mean; + void const * ptr_gamma; + void const * ptr_beta; + void const * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_var; + int64_t batch_stride_mean; + int64_t batch_stride_gamma; + int64_t batch_stride_beta; + int64_t batch_stride_C; + + typename LayoutA::Stride stride_a; + typename LayoutB::Stride stride_b; + typename LayoutScaleBias::Stride stride_var; + typename LayoutScaleBias::Stride stride_mean; + typename LayoutScaleBias::Stride stride_gamma; + typename LayoutScaleBias::Stride stride_beta; + typename LayoutC::Stride stride_c; + typename LayoutC::Stride stride_d; + + typename LayoutA::Stride::LongIndex lda; + typename LayoutB::Stride::LongIndex ldb; + typename LayoutScaleBias::Stride::LongIndex ld_var; + typename LayoutScaleBias::Stride::LongIndex ld_mean; + typename LayoutScaleBias::Stride::LongIndex ld_gamma; + typename LayoutScaleBias::Stride::LongIndex ld_beta; + typename LayoutC::Stride::LongIndex ldc; + typename LayoutC::Stride::LongIndex ldd; + + int const * ptr_gather_A_indices; + int const * ptr_gather_B_indices; + int const * ptr_scatter_D_indices; + + // + // Methods + // + + Arguments(): + ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr), + ptr_var(nullptr), ptr_mean(nullptr), + ptr_gamma(nullptr), ptr_beta(nullptr), + ptr_gather_A_indices(nullptr), + ptr_gather_B_indices(nullptr), + ptr_scatter_D_indices(nullptr) + {} + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_var, + void const * ptr_mean, + void const * ptr_gamma, + void const * ptr_beta, + void const * ptr_C, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_var, + int64_t batch_stride_mean, + int64_t batch_stride_gamma, + int64_t batch_stride_beta, + int64_t batch_stride_C, + int64_t batch_stride_D, + typename LayoutA::Stride stride_a, + typename LayoutB::Stride stride_b, + typename LayoutScaleBias::Stride stride_var, + typename LayoutScaleBias::Stride stride_mean, + typename LayoutScaleBias::Stride stride_gamma, + typename LayoutScaleBias::Stride stride_beta, + typename LayoutC::Stride stride_c, + typename LayoutC::Stride stride_d, + int const *ptr_gather_A_indices = nullptr, + int const *ptr_gather_B_indices = nullptr, + int const *ptr_scatter_D_indices = nullptr) + : + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + ptr_var(ptr_var), ptr_mean(ptr_mean), + ptr_gamma(ptr_gamma), ptr_beta(ptr_beta), + batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), + batch_stride_var(batch_stride_var), batch_stride_mean(batch_stride_mean), + batch_stride_gamma(batch_stride_gamma), batch_stride_beta(batch_stride_beta), + lda(0), ldb(0), ldc(0), ldd(0), + ld_var(0), ld_mean(0), + ld_gamma(0), ld_beta(0), + stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d), + stride_var(stride_var), stride_mean(stride_mean), + stride_gamma(stride_gamma), stride_beta(stride_beta), + ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices), + ptr_scatter_D_indices(ptr_scatter_D_indices) + { + CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); + } + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_var, + void const * ptr_mean, + void const * ptr_gamma, + void const * ptr_beta, + void const * ptr_C, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_var, + int64_t batch_stride_mean, + int64_t batch_stride_gamma, + int64_t batch_stride_beta, + int64_t batch_stride_C, + int64_t batch_stride_D, + typename LayoutA::Stride::LongIndex lda, + typename LayoutB::Stride::LongIndex ldb, + typename LayoutScaleBias::Stride::LongIndex ld_var, + typename LayoutScaleBias::Stride::LongIndex ld_mean, + typename LayoutScaleBias::Stride::LongIndex ld_gamma, + typename LayoutScaleBias::Stride::LongIndex ld_beta, + typename LayoutC::Stride::LongIndex ldc, + typename LayoutC::Stride::LongIndex ldd, + int const *ptr_gather_A_indices = nullptr, + int const *ptr_gather_B_indices = nullptr, + int const *ptr_scatter_D_indices = nullptr) + : + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + ptr_var(ptr_var), ptr_mean(ptr_mean), + ptr_gamma(ptr_gamma), ptr_beta(ptr_beta), + batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), + batch_stride_var(batch_stride_var), batch_stride_mean(batch_stride_mean), + batch_stride_gamma(batch_stride_gamma), batch_stride_beta(batch_stride_beta), + lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), + ld_var(ld_var), ld_mean(ld_mean), + ld_gamma(ld_gamma), ld_beta(ld_beta), + ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices), + ptr_scatter_D_indices(ptr_scatter_D_indices) + { + stride_a = make_Coord(lda); + stride_b = make_Coord(ldb); + stride_c = make_Coord(ldc); + stride_d = make_Coord(ldd); + stride_var = make_Coord(ld_var); + stride_mean = make_Coord(ld_mean); + stride_gamma = make_Coord(ld_gamma); + stride_beta = make_Coord(ld_beta); + CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); + } + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.stride_a, args.stride_b); + std::swap(args.batch_stride_A, args.batch_stride_B); + std::swap(args.ptr_gather_A_indices, args.ptr_gather_B_indices); + + return args; + } + }; + + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params : UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB> + { + using ParamsBase = UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB>; + + // + // Data members + // + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::Params params_D; + + typename EpilogueOutputOp::Params output_op; + + void * ptr_A; + void * ptr_B; + void * ptr_var; + void * ptr_mean; + void * ptr_gamma; + void * ptr_beta; + void * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_var; + int64_t batch_stride_mean; + int64_t batch_stride_gamma; + int64_t batch_stride_beta; + int64_t batch_stride_C; + + int * ptr_gather_A_indices; + int * ptr_gather_B_indices; + int * ptr_scatter_D_indices; + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + ParamsBase(args, device_sms, sm_occupancy), + params_A(args.lda ? make_Coord_with_padding(args.lda) : args.stride_a), + params_B(args.ldb ? make_Coord_with_padding(args.ldb) : args.stride_b), + params_C(args.ldc ? make_Coord_with_padding(args.ldc) : args.stride_c), + params_D(args.ldd ? make_Coord_with_padding(args.ldd) : args.stride_d), + output_op(args.epilogue), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_var(const_cast(args.ptr_var)), + ptr_mean(const_cast(args.ptr_mean)), + ptr_gamma(const_cast(args.ptr_gamma)), + ptr_beta(const_cast(args.ptr_beta)), + ptr_C(const_cast(args.ptr_C)), + ptr_D(args.ptr_D), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_var(args.batch_stride_var), + batch_stride_mean(args.batch_stride_mean), + batch_stride_gamma(args.batch_stride_gamma), + batch_stride_beta(args.batch_stride_beta), + batch_stride_C(args.batch_stride_C), + ptr_gather_A_indices(const_cast(args.ptr_gather_A_indices)), + ptr_gather_B_indices(const_cast(args.ptr_gather_B_indices)), + ptr_scatter_D_indices(const_cast(args.ptr_scatter_D_indices)) + {} + + /// Lightweight update given a subset of arguments. + void update(Arguments const &args) + { + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_var = const_cast(args.ptr_var); + ptr_mean = const_cast(args.ptr_mean); + ptr_gamma = const_cast(args.ptr_gamma); + ptr_beta = const_cast(args.ptr_beta); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + batch_stride_var = args.batch_stride_var; + batch_stride_mean = args.batch_stride_mean; + batch_stride_gamma = args.batch_stride_gamma; + batch_stride_beta = args.batch_stride_beta; + this->batch_stride_D = args.batch_stride_D; + + ptr_gather_A_indices = const_cast(args.ptr_gather_A_indices); + ptr_gather_B_indices = const_cast(args.ptr_gather_B_indices); + ptr_scatter_D_indices = const_cast(args.ptr_scatter_D_indices); + + output_op = args.epilogue; + + CUTLASS_TRACE_HOST("GemmUniversal::Params::update()"); + } + }; + + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + CUTLASS_TRACE_HOST("GemmUniversal::can_implement()"); + + static int const kAlignmentA = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (platform::is_same::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } else if (platform::is_same::value) { + isAMisaligned = problem_size.m() % kAlignmentA; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } + + if (platform::is_same::value) { + isBMisaligned = problem_size.n() % kAlignmentB; + } else if (platform::is_same::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } + + if (platform::is_same::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } else if (platform::is_same::value) { + isCMisaligned = problem_size.m() % kAlignmentC; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } + + if (isAMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); + return Status::kErrorMisalignedOperand; + } + + if (isBMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); + return Status::kErrorMisalignedOperand; + } + + if (isCMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); + return Status::kErrorMisalignedOperand; + } + + CUTLASS_TRACE_HOST(" returning kSuccess"); + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmLayernormMainloopFusion op; + op(params, shared_storage); + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; + ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[threadblock_tile_offset.k()]; + ptr_B = static_cast(params.ptr_B)[threadblock_tile_offset.k()]; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_B{ + offset_k, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A, + params.ptr_gather_A_indices); + + typename Mma::IteratorB iterator_B( + params.params_B, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B, + params.ptr_gather_B_indices); + + // Construct iterators to A var/mean vector + typename Mma::IteratorVarMean iterator_var_mean( + params.problem_size.m(), + static_cast(params.ptr_var), + static_cast(params.ptr_mean), + thread_idx, + MatrixCoord(0, (threadblock_tile_offset.m() * Mma::Shape::kM)) + ); + + // Construct iterators to A scale/bias vector + typename Mma::IteratorGammaBeta iterator_gamma_beta( + problem_size_k, + static_cast(params.ptr_gamma), + static_cast(params.ptr_beta), + thread_idx, + MatrixCoord( + 0, (threadblock_tile_offset.k() * Mma::Shape::kK) + ) + ); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + iterator_var_mean, + iterator_gamma_beta, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + + // + // Fetch pointers based on mode. + // + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[threadblock_tile_offset.k()]; + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.problem_size.mn(), + thread_idx, + threadblock_offset, + params.ptr_scatter_D_indices + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset, + params.ptr_scatter_D_indices + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + } + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + // + // Release the semaphore + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_params.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_params.h new file mode 100644 index 0000000000000000000000000000000000000000..046ad7596cb17fc334cb5b9cfbe8f7923c8046a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_params.h @@ -0,0 +1,199 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h" +#include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h" + +#include "cutlass/trace.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +struct GemmParams { + + // + // Type definitions + // + using Index = int32_t; + using LongIndex = int64_t; + + using MmaIteratorParams = typename cutlass::transform::threadblock::PredicatedTileAccessIteratorParams; + using EpilogueIteratorParams = typename cutlass::epilogue::threadblock::PredicatedTileIteratorParams; + + // + // Data members + // + + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + + // Data members for Mma::Iterator::Params + MmaIteratorParams params_itr_a; + MmaIteratorParams params_itr_b; + + // Data member for Epilogue::OutputTileIterator::Params + EpilogueIteratorParams params_itr_c; + EpilogueIteratorParams params_itr_d; + + + GemmUniversalMode mode; + int batch_count; + int gemm_k_size; + + void * ptr_A; + void * ptr_B; + void * ptr_C; + void * ptr_D; + + LongIndex lda; + LongIndex ldb; + LongIndex ldc; + LongIndex ldd; + + LongIndex batch_stride_A; + LongIndex batch_stride_B; + LongIndex batch_stride_C; + LongIndex batch_stride_D; + + int *semaphore; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + GemmParams() {} + + CUTLASS_HOST_DEVICE + GemmParams( + cutlass::gemm::GemmCoord problem_size_, + cutlass::gemm::GemmCoord grid_tiled_shape_, + int swizzle_log_tile_, + GemmUniversalMode mode_, + int batch_count_, + int gemm_k_size_, + void const * ptr_A_, + void const * ptr_B_, + void const * ptr_C_, + void * ptr_D_, + LongIndex lda_, + LongIndex ldb_, + LongIndex ldc_, + LongIndex ldd_, + int64_t batch_stride_A_, + int64_t batch_stride_B_, + int64_t batch_stride_C_, + int64_t batch_stride_D_, + MmaIteratorParams const & params_itr_a_, + MmaIteratorParams const & params_itr_b_, + EpilogueIteratorParams const & params_itr_c_, + EpilogueIteratorParams const & params_itr_d_, + void *workspace_ = nullptr) : + problem_size(problem_size_), + grid_tiled_shape(grid_tiled_shape_), + swizzle_log_tile(swizzle_log_tile_), + mode(mode_), + batch_count(batch_count_), + gemm_k_size(gemm_k_size_), + ptr_A(const_cast(ptr_A_)), + ptr_B(const_cast(ptr_B_)), + ptr_C(const_cast(ptr_C_)), + ptr_D(ptr_D_), + lda(lda_), + ldb(ldb_), + ldc(ldc_), + ldd(ldd_), + batch_stride_A(batch_stride_A_), + batch_stride_B(batch_stride_B_), + batch_stride_C(batch_stride_C_), + batch_stride_D(batch_stride_D_), + params_itr_a(params_itr_a_), + params_itr_b(params_itr_b_), + params_itr_c(params_itr_c_), + params_itr_d(params_itr_d_), + semaphore(static_cast(workspace_) + ) { } + + + CUTLASS_HOST_DEVICE + void update( + void const * ptr_A_, + void const * ptr_B_, + void const * ptr_C_, + void * ptr_D_, + int64_t batch_stride_A_, + int64_t batch_stride_B_, + int64_t batch_stride_C_, + int64_t batch_stride_D_, + void *workspace_ = nullptr) { + + ptr_A = const_cast(ptr_A_); + ptr_B = const_cast(ptr_B_); + ptr_C = const_cast(ptr_C_); + ptr_D = ptr_D_; + + batch_stride_A = batch_stride_A_; + batch_stride_B = batch_stride_B_; + batch_stride_C = batch_stride_C_; + batch_stride_D = batch_stride_D_; + + + semaphore = static_cast(workspace_); + CUTLASS_TRACE_HOST("GemmParams::update()"); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_pipelined.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_pipelined.h new file mode 100644 index 0000000000000000000000000000000000000000..900e04428f192ded4c2fead25ac4d156644954fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_pipelined.h @@ -0,0 +1,158 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/array.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/gemm/gemm.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +__global__ void GemmPipelined( + cutlass::gemm::GemmCoord problem_size, + cutlass::gemm::GemmCoord grid_tiled_shape, + typename Mma::IteratorA::Params params_A, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::Params params_B, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::Params params_epilogue + ) { + + // Shared storage needed by threadblock-scoped matrix multiply-accumulate + __shared__ union { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + } shared_storage; + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + int swizzle_log_tile = ThreadblockSwizzle().get_log_tile(grid_tiled_shape); + + cutlass::gemm::GemmCoord tb_tile_offset = threadblock_swizzle.get_tile_offset(swizzle_log_tile); + + if (grid_tiled_shape.m() <= tb_tile_offset.m() || + grid_tiled_shape.n() <= tb_tile_offset.n()) { + + return; + } + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + tb_tile_offset.m() * Mma::Shape::kM, + tb_tile_offset.k() + }; + + cutlass::MatrixCoord tb_offset_B{ + tb_tile_offset.k(), + tb_tile_offset.n() * Mma::Shape::kN + }; + + // Compute position within threadblock + int tb_thread_id = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params_A, + ref_A.data(), + {problem_size.m(), problem_size.k()}, + tb_thread_id, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params_B, + ref_B.data(), + {problem_size.k(), problem_size.n()}, + tb_thread_id, + tb_offset_B); + + int warp_id = canonical_warp_idx_sync(); + int lane_id = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, tb_thread_id, warp_id, lane_id); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + mma(problem_size, accumulators, iterator_A, iterator_B, accumulators); + + // + // Epilogue + // + + Epilogue epilogue( + params_epilogue, + shared_storage.epilogue, + tb_thread_id, + warp_id, + lane_id); + + tb_tile_offset = threadblock_swizzle.get_tile_offset(swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + tb_tile_offset.m() * Mma::Shape::kM, + tb_tile_offset.n() * Mma::Shape::kN + ); + + // run efficient epilogue + epilogue({problem_size.m(), problem_size.n()}, accumulators, threadblock_offset); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_planar_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_planar_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..6987d7e691a52694846b82bae73057a8e15153c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_planar_complex.h @@ -0,0 +1,727 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" +#include "cutlass/gemm/kernel/params_universal_base.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmPlanarComplex { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + using Operator = typename Mma::Operator; + using ArchTag = typename Mma::ArchTag; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Split-K preserves splits that are 128b aligned + static int const kSplitKAlignment = const_max( + 128 / sizeof_bits::value, + 128 / sizeof_bits::value); + + // + // Additional types needed for reflection + // + + using ElementAccumulator = typename Mma::Policy::Operator::ElementC; + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::Shape; + + static int const kStages = Mma::kStages; + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + // + // Arguments structure + // + + /// Argument structure + struct Arguments : UniversalArgumentsBase + { + // + // Data members + // + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A_real; + void const * ptr_A_imag; + + void const * ptr_B_real; + void const * ptr_B_imag; + + void const * ptr_C_real; + void const * ptr_C_imag; + + void * ptr_D_real; + void * ptr_D_imag; + + typename LayoutA::Stride::Index lda_real; + typename LayoutA::Stride::Index lda_imag; + typename LayoutB::Stride::Index ldb_real; + typename LayoutB::Stride::Index ldb_imag; + typename LayoutC::Stride::Index ldc_real; + typename LayoutC::Stride::Index ldc_imag; + typename LayoutC::Stride::Index ldd_real; + typename LayoutC::Stride::Index ldd_imag; + + int64_t batch_stride_A; + int64_t batch_stride_A_imag; + int64_t batch_stride_B; + int64_t batch_stride_B_imag; + int64_t batch_stride_C; + int64_t batch_stride_C_imag; + int64_t batch_stride_D_imag; + + // + // Methods + // + + Arguments() : + ptr_A_real(nullptr), + ptr_A_imag(nullptr), + ptr_B_real(nullptr), + ptr_B_imag(nullptr), + ptr_C_real(nullptr), + ptr_C_imag(nullptr), + ptr_D_real(nullptr), + ptr_D_imag(nullptr) + {} + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A_real, + void const * ptr_A_imag, + void const * ptr_B_real, + void const * ptr_B_imag, + void const * ptr_C_real, + void const * ptr_C_imag, + void * ptr_D_real, + void * ptr_D_imag, + typename LayoutA::Stride::Index lda_real, + typename LayoutA::Stride::Index lda_imag, + typename LayoutB::Stride::Index ldb_real, + typename LayoutB::Stride::Index ldb_imag, + typename LayoutC::Stride::Index ldc_real, + typename LayoutC::Stride::Index ldc_imag, + typename LayoutC::Stride::Index ldd_real, + typename LayoutC::Stride::Index ldd_imag, + int64_t batch_stride_A = 0, + int64_t batch_stride_A_imag = 0, + int64_t batch_stride_B = 0, + int64_t batch_stride_B_imag = 0, + int64_t batch_stride_C = 0, + int64_t batch_stride_C_imag = 0, + int64_t batch_stride_D = 0, + int64_t batch_stride_D_imag = 0) + : + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_A_real(ptr_A_real), + ptr_A_imag(ptr_A_imag), + ptr_B_real(ptr_B_real), + ptr_B_imag(ptr_B_imag), + ptr_C_real(ptr_C_real), + ptr_C_imag(ptr_C_imag), + ptr_D_real(ptr_D_real), + ptr_D_imag(ptr_D_imag), + lda_real(lda_real), + lda_imag(lda_imag), + ldb_real(ldb_real), + ldb_imag(ldb_imag), + ldc_real(ldc_real), + ldc_imag(ldc_imag), + ldd_real(ldd_real), + ldd_imag(ldd_imag), + batch_stride_A(batch_stride_A), + batch_stride_A_imag(batch_stride_A_imag), + batch_stride_B(batch_stride_B), + batch_stride_B_imag(batch_stride_B_imag), + batch_stride_C(batch_stride_C), + batch_stride_C_imag(batch_stride_C_imag), + batch_stride_D_imag(batch_stride_D_imag) + {} + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_A_real, args.ptr_B_real); + std::swap(args.ptr_A_imag, args.ptr_B_imag); + std::swap(args.lda_real, args.ldb_real); + std::swap(args.lda_imag, args.ldb_imag); + std::swap(args.batch_stride_A, args.batch_stride_B); + std::swap(args.batch_stride_A_imag, args.batch_stride_B_imag); + + return args; + } + }; + + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params : UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB> + { + using ParamsBase = UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB>; + + // + // Data members + // + + typename Mma::IteratorA::Params params_A_real; + typename Mma::IteratorA::Params params_A_imag; + typename Mma::IteratorB::Params params_B_real; + typename Mma::IteratorB::Params params_B_imag; + typename Epilogue::OutputTileIterator::Params params_C_real; + typename Epilogue::OutputTileIterator::Params params_C_imag; + typename Epilogue::OutputTileIterator::Params params_D_real; + typename Epilogue::OutputTileIterator::Params params_D_imag; + + typename EpilogueOutputOp::Params output_op; + + void * ptr_A_real; + void * ptr_A_imag; + void * ptr_B_real; + void * ptr_B_imag; + void * ptr_C_real; + void * ptr_C_imag; + void * ptr_D_real; + void * ptr_D_imag; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + + int64_t batch_stride_A_imag; + int64_t batch_stride_B_imag; + int64_t batch_stride_C_imag; + int64_t batch_stride_D_imag; + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + ParamsBase(args, device_sms, sm_occupancy), + params_A_real(args.lda_real), + params_A_imag(args.lda_imag), + params_B_real(args.ldb_real), + params_B_imag(args.ldb_imag), + params_C_real(args.ldc_real), + params_C_imag(args.ldc_imag), + params_D_real(args.ldd_real), + params_D_imag(args.ldd_imag), + output_op(args.epilogue), + ptr_A_real(const_cast(args.ptr_A_real)), + ptr_A_imag(const_cast(args.ptr_A_imag)), + ptr_B_real(const_cast(args.ptr_B_real)), + ptr_B_imag(const_cast(args.ptr_B_imag)), + ptr_C_real(const_cast(args.ptr_C_real)), + ptr_C_imag(const_cast(args.ptr_C_imag)), + ptr_D_real(args.ptr_D_real), + ptr_D_imag(args.ptr_D_imag), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C(args.batch_stride_C), + batch_stride_A_imag(args.batch_stride_A_imag), + batch_stride_B_imag(args.batch_stride_B_imag), + batch_stride_C_imag(args.batch_stride_C_imag), + batch_stride_D_imag(args.batch_stride_D_imag) + {} + + /// Returns the workspace size (in bytes) needed for this problem geometry + size_t get_workspace_size() const + { + size_t workspace_bytes = ParamsBase::get_workspace_size(); + if (this->mode == GemmUniversalMode::kGemmSplitKParallel) + { + // Double the size returned by the base class because we need to + // accumulate two ElementC components + workspace_bytes *= 2; + } + + return workspace_bytes; + } + + /// Lightweight update given a subset of arguments. + void update(Arguments const &args) + { + ptr_A_real = const_cast(args.ptr_A_real); + ptr_A_imag = const_cast(args.ptr_A_imag); + + ptr_B_real = const_cast(args.ptr_B_real); + ptr_B_imag = const_cast(args.ptr_B_imag); + + ptr_C_real = const_cast(args.ptr_C_real); + ptr_C_imag = const_cast(args.ptr_C_imag); + + ptr_D_real = const_cast(args.ptr_D_real); + ptr_D_imag = const_cast(args.ptr_D_imag); + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + this->batch_stride_D = args.batch_stride_D; + batch_stride_A_imag = args.batch_stride_A_imag; + batch_stride_B_imag = args.batch_stride_B_imag; + batch_stride_C_imag = args.batch_stride_C_imag; + batch_stride_D_imag = args.batch_stride_D_imag; + + output_op = args.epilogue; + } + }; + + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement(Arguments const &args) + { + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (platform::is_same::value) { + isAMisaligned = args.problem_size.k() % kAlignmentA; + } else if (platform::is_same::value) { + isAMisaligned = args.problem_size.m() % kAlignmentA; + } + + if (platform::is_same::value) { + isBMisaligned = args.problem_size.n() % kAlignmentB; + } else if (platform::is_same::value) { + isBMisaligned = args.problem_size.k() % kAlignmentB; + } + + if (platform::is_same::value) { + isCMisaligned = args.problem_size.n() % kAlignmentC; + } else if (platform::is_same::value) { + isCMisaligned = args.problem_size.m() % kAlignmentC; + } + + if (isAMisaligned || isBMisaligned || isCMisaligned) { + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmPlanarComplex op; + op(params, shared_storage); + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A_real = static_cast(params.ptr_A_real); + ElementA *ptr_A_imag = static_cast(params.ptr_A_imag); + + ElementB *ptr_B_real = static_cast(params.ptr_B_real); + ElementB *ptr_B_imag = static_cast(params.ptr_B_imag); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_A_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_A; + ptr_A_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_A_imag; + ptr_B_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_B; + ptr_B_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_B_imag; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_A_real = static_cast(params.ptr_A_real)[threadblock_tile_offset.k()]; + ptr_A_imag = static_cast(params.ptr_A_imag)[threadblock_tile_offset.k()]; + ptr_B_real = static_cast(params.ptr_B_real)[threadblock_tile_offset.k()]; + ptr_B_imag = static_cast(params.ptr_B_imag)[threadblock_tile_offset.k()]; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_B{ + offset_k, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A_real( + params.params_A_real, + ptr_A_real, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorA iterator_A_imag( + params.params_A_imag, + ptr_A_imag, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B_real( + params.params_B_real, + ptr_B_real, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + typename Mma::IteratorB iterator_B_imag( + params.params_B_imag, + ptr_B_imag, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A_real, + iterator_A_imag, + iterator_B_real, + iterator_B_imag, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_C_real = static_cast(params.ptr_C_real); + ElementC *ptr_C_imag = static_cast(params.ptr_C_imag); + ElementC *ptr_D_real = static_cast(params.ptr_D_real); + ElementC *ptr_D_imag = static_cast(params.ptr_D_imag); + + // + // Fetch pointers based on mode. + // + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D_real += threadblock_tile_offset.k() * params.batch_stride_D; + ptr_D_imag += threadblock_tile_offset.k() * params.batch_stride_D_imag; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_C_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_C; + ptr_C_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_C_imag; + ptr_D_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_D; + ptr_D_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_D_imag; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_C_real = static_cast(params.ptr_C_real)[threadblock_tile_offset.k()]; + ptr_C_imag = static_cast(params.ptr_C_imag)[threadblock_tile_offset.k()]; + ptr_D_real = static_cast(params.ptr_D_real)[threadblock_tile_offset.k()]; + ptr_D_imag = static_cast(params.ptr_D_imag)[threadblock_tile_offset.k()]; + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C_real( + params.params_C_real, + ptr_C_real, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + typename Epilogue::OutputTileIterator iterator_C_imag( + params.params_C_imag, + ptr_C_imag, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D_real( + params.params_D_real, + ptr_D_real, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + typename Epilogue::OutputTileIterator iterator_D_imag( + params.params_D_imag, + ptr_D_imag, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // + // Construct epilogue + // + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C_real = iterator_D_real; + iterator_C_imag = iterator_D_imag; + } + + semaphore.wait(threadblock_tile_offset.k()); + + __threadfence(); + } + + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D_real, + iterator_D_imag, + accumulators, + iterator_C_real, + iterator_C_imag); + + // + // Release the semaphore + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_planar_complex_array.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_planar_complex_array.h new file mode 100644 index 0000000000000000000000000000000000000000..6a3aa11c1d2bf6e30e0faf16e5eac71fc1953957 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_planar_complex_array.h @@ -0,0 +1,621 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" +#include "cutlass/gemm/kernel/params_universal_base.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmPlanarComplexArray { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + using Operator = typename Mma::Operator; + using ArchTag = typename Mma::ArchTag; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Split-K preserves splits that are 128b aligned + static int const kSplitKAlignment = const_max( + 128 / sizeof_bits::value, + 128 / sizeof_bits::value); + + // + // Additional types needed for reflection + // + + using ElementAccumulator = typename Mma::Policy::Operator::ElementC; + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::Shape; + + static int const kStages = Mma::kStages; + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + // + // Arguments structure + // + + /// Argument structure + struct Arguments : UniversalArgumentsBase + { + // + // Data members + // + + typename EpilogueOutputOp::Params epilogue; + + int const *ptr_M; + int const *ptr_N; + int const *ptr_K; + + void const * const * ptr_A_real; + void const * const * ptr_A_imag; + + void const * const * ptr_B_real; + void const * const * ptr_B_imag; + + void const * const * ptr_C_real; + void const * const * ptr_C_imag; + + void * const * ptr_D_real; + void * const * ptr_D_imag; + + typename LayoutA::Stride::Index lda_real; + typename LayoutA::Stride::Index lda_imag; + typename LayoutB::Stride::Index ldb_real; + typename LayoutB::Stride::Index ldb_imag; + typename LayoutC::Stride::Index ldc_real; + typename LayoutC::Stride::Index ldc_imag; + typename LayoutC::Stride::Index ldd_real; + typename LayoutC::Stride::Index ldd_imag; + + // + // Methods + // + + Arguments(): + ptr_M(nullptr), + ptr_N(nullptr), + ptr_K(nullptr), + ptr_A_real(nullptr), + ptr_A_imag(nullptr), + ptr_B_real(nullptr), + ptr_B_imag(nullptr), + ptr_C_real(nullptr), + ptr_C_imag(nullptr), + ptr_D_real(nullptr), + ptr_D_imag(nullptr) + {} + + /// constructs an arguments structure + Arguments( + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + int const *ptr_M, + int const *ptr_N, + int const *ptr_K, + void const * const * ptr_A_real, + void const * const * ptr_A_imag, + void const * const * ptr_B_real, + void const * const * ptr_B_imag, + void const * const * ptr_C_real, + void const * const * ptr_C_imag, + void * const * ptr_D_real, + void * const * ptr_D_imag, + typename LayoutA::Stride::Index lda_real, + typename LayoutA::Stride::Index lda_imag, + typename LayoutB::Stride::Index ldb_real, + typename LayoutB::Stride::Index ldb_imag, + typename LayoutC::Stride::Index ldc_real, + typename LayoutC::Stride::Index ldc_imag, + typename LayoutC::Stride::Index ldd_real, + typename LayoutC::Stride::Index ldd_imag) + : + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_M(ptr_M), + ptr_N(ptr_N), + ptr_K(ptr_K), + ptr_A_real(ptr_A_real), + ptr_A_imag(ptr_A_imag), + ptr_B_real(ptr_B_real), + ptr_B_imag(ptr_B_imag), + ptr_C_real(ptr_C_real), + ptr_C_imag(ptr_C_imag), + ptr_D_real(ptr_D_real), + ptr_D_imag(ptr_D_imag), + lda_real(lda_real), + lda_imag(lda_imag), + ldb_real(ldb_real), + ldb_imag(ldb_imag), + ldc_real(ldc_real), + ldc_imag(ldc_imag), + ldd_real(ldd_real), + ldd_imag(ldd_imag) + {} + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_M, args.ptr_N); + std::swap(args.ptr_A_real, args.ptr_B_real); + std::swap(args.ptr_A_imag, args.ptr_B_imag); + std::swap(args.lda_real, args.ldb_real); + std::swap(args.lda_imag, args.ldb_imag); + + return args; + } + }; + + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params : UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB> + { + using ParamsBase = UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB>; + + // + // Data members + // + + typename Mma::IteratorA::Params params_A_real; + typename Mma::IteratorA::Params params_A_imag; + typename Mma::IteratorB::Params params_B_real; + typename Mma::IteratorB::Params params_B_imag; + typename Epilogue::OutputTileIterator::Params params_C_real; + typename Epilogue::OutputTileIterator::Params params_C_imag; + typename Epilogue::OutputTileIterator::Params params_D_real; + typename Epilogue::OutputTileIterator::Params params_D_imag; + + typename EpilogueOutputOp::Params output_op; + + int const *ptr_M; + int const *ptr_N; + int const *ptr_K; + + void const * const * ptr_A_real; + void const * const * ptr_A_imag; + void const * const * ptr_B_real; + void const * const * ptr_B_imag; + void const * const * ptr_C_real; + void const * const * ptr_C_imag; + void * const * ptr_D_real; + void * const * ptr_D_imag; + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + ParamsBase(args, device_sms, sm_occupancy), + ptr_M(args.ptr_M), + ptr_N(args.ptr_N), + ptr_K(args.ptr_K), + params_A_real(args.lda_real), + params_A_imag(args.lda_imag), + params_B_real(args.ldb_real), + params_B_imag(args.ldb_imag), + params_C_real(args.ldc_real), + params_C_imag(args.ldc_imag), + params_D_real(args.ldd_real), + params_D_imag(args.ldd_imag), + output_op(args.epilogue), + ptr_A_real(args.ptr_A_real), + ptr_A_imag(args.ptr_A_imag), + ptr_B_real(args.ptr_B_real), + ptr_B_imag(args.ptr_B_imag), + ptr_C_real(args.ptr_C_real), + ptr_C_imag(args.ptr_C_imag), + ptr_D_real(args.ptr_D_real), + ptr_D_imag(args.ptr_D_imag) + {} + + /// Lightweight update given a subset of arguments. + void update(Arguments const &args) + { + ptr_M = args.ptr_M; + ptr_N = args.ptr_N; + ptr_K = args.ptr_K; + + ptr_A_real = args.ptr_A_real; + ptr_A_imag = args.ptr_A_imag; + + ptr_B_real = args.ptr_B_real; + ptr_B_imag = args.ptr_B_imag; + + ptr_C_real = args.ptr_C_real; + ptr_C_imag = args.ptr_C_imag; + + ptr_D_real = args.ptr_D_real; + ptr_D_imag = args.ptr_D_imag; + + output_op = args.epilogue; + } + }; + + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement(Arguments const &args) { + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (platform::is_same::value) { + isAMisaligned = args.problem_size.k() % kAlignmentA; + } else if (platform::is_same::value) { + isAMisaligned = args.problem_size.m() % kAlignmentA; + } + + if (platform::is_same::value) { + isBMisaligned = args.problem_size.n() % kAlignmentB; + } else if (platform::is_same::value) { + isBMisaligned = args.problem_size.k() % kAlignmentB; + } + + if (platform::is_same::value) { + isCMisaligned = args.problem_size.n() % kAlignmentC; + } else if (platform::is_same::value) { + isCMisaligned = args.problem_size.m() % kAlignmentC; + } + + if (isAMisaligned || isBMisaligned || isCMisaligned) { + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmPlanarComplexArray op; + op(params, shared_storage); + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int batch_idx = threadblock_tile_offset.k(); + + int problem_size_m = params.problem_size.m(); + int problem_size_n = params.problem_size.n(); + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A_real = static_cast(const_cast(params.ptr_A_real[batch_idx])); + ElementA *ptr_A_imag = static_cast(const_cast(params.ptr_A_imag[batch_idx])); + + ElementB *ptr_B_real = static_cast(const_cast(params.ptr_B_real[batch_idx])); + ElementB *ptr_B_imag = static_cast(const_cast(params.ptr_B_imag[batch_idx])); + + // + // If pointers for problem sizes are specified, these are loaded from global memory + // + + if (params.ptr_M) { + problem_size_m = params.ptr_M[batch_idx]; + } + + if (params.ptr_N) { + problem_size_n = params.ptr_N[batch_idx]; + } + + if (params.ptr_K) { + problem_size_k = params.ptr_K[batch_idx]; + } + + int const kBlockCountM = (problem_size_m + Mma::Shape::kM - 1) / Mma::Shape::kM; + int const kBlockCountN = (problem_size_n + Mma::Shape::kN - 1) / Mma::Shape::kN; + + int const kGemmKIterations = (problem_size_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // + // Each threadblock loops over the logical problem size which the kernel may have discovered + // after the grid is launched. + // + + CUTLASS_PRAGMA_NO_UNROLL + for (int block_m = threadblock_tile_offset.m(); + block_m < kBlockCountM; + block_m += params.grid_tiled_shape.m()) { + + CUTLASS_PRAGMA_NO_UNROLL + for (int block_n = threadblock_tile_offset.n(); + block_n < kBlockCountN; + block_n += params.grid_tiled_shape.n()) { + + // + // Compute indices within threadblock and warp. + // + int thread_idx = threadIdx.x; + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + int lane_idx = threadIdx.x % 32; + + // + // Proceed with regular GEMM logic. + // + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ block_m * Mma::Shape::kM, 0}; + cutlass::MatrixCoord tb_offset_B{ 0, block_n * Mma::Shape::kN }; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A_real( + params.params_A_real, + ptr_A_real, + {problem_size_m, problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorA iterator_A_imag( + params.params_A_imag, + ptr_A_imag, + {problem_size_m, problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B_real( + params.params_B_real, + ptr_B_real, + {problem_size_k, problem_size_n}, + thread_idx, + tb_offset_B); + + typename Mma::IteratorB iterator_B_imag( + params.params_B_imag, + ptr_B_imag, + {problem_size_k, problem_size_n}, + thread_idx, + tb_offset_B); + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + mma( + kGemmKIterations, + accumulators, + iterator_A_real, + iterator_A_imag, + iterator_B_real, + iterator_B_imag, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + //assume identity swizzle + MatrixCoord threadblock_offset( + block_m * Mma::Shape::kM, + block_n * Mma::Shape::kN + ); + + ElementC *ptr_C_real = static_cast(const_cast(params.ptr_C_real[batch_idx])); + ElementC *ptr_C_imag = static_cast(const_cast(params.ptr_C_imag[batch_idx])); + ElementC *ptr_D_real = static_cast(params.ptr_D_real[batch_idx]); + ElementC *ptr_D_imag = static_cast(params.ptr_D_imag[batch_idx]); + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C_real( + params.params_C_real, + ptr_C_real, + {problem_size_m, problem_size_n}, + thread_idx, + threadblock_offset + ); + + typename Epilogue::OutputTileIterator iterator_C_imag( + params.params_C_imag, + ptr_C_imag, + {problem_size_m, problem_size_n}, + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D_real( + params.params_D_real, + ptr_D_real, + {problem_size_m, problem_size_n}, + thread_idx, + threadblock_offset + ); + + typename Epilogue::OutputTileIterator iterator_D_imag( + params.params_D_imag, + ptr_D_imag, + {problem_size_m, problem_size_n}, + thread_idx, + threadblock_offset + ); + + // + // Construct epilogue + // + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D_real, + iterator_D_imag, + accumulators, + iterator_C_real, + iterator_C_imag); + + + } // for block_n + } // for block_m + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_splitk_parallel.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_splitk_parallel.h new file mode 100644 index 0000000000000000000000000000000000000000..ffb928c32c332a58663f2ffde7f4c6729de01665 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_splitk_parallel.h @@ -0,0 +1,253 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for GEMM performing a reduction over K partitions in parallel. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmSplitKParallel { + + using Mma = Mma_; + using Epilogue = Epilogue_; + using OutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + static int const kAlignmentK = Mma::Operator::Shape::kK; + + /// Parameters structure + struct Params { + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorA::TensorRef ref_A; + typename Mma::IteratorB::Params params_B; + typename Mma::IteratorB::TensorRef ref_B; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::OutputTileIterator::TensorRef ref_D; + typename OutputOp::Params output_op; + int64_t splitk_slice_stride; + int gemm_k_size; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): swizzle_log_tile(0) { } + + CUTLASS_HOST_DEVICE + Params( + cutlass::gemm::GemmCoord const & problem_size, + cutlass::gemm::GemmCoord const & grid_tiled_shape, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::OutputTileIterator::TensorRef ref_D, + typename OutputOp::Params output_op, + int64_t splitk_slice_stride + ): + problem_size(problem_size), + grid_tiled_shape(grid_tiled_shape), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A(ref_A.layout()), + ref_A(ref_A), + params_B(ref_B.layout()), + ref_B(ref_B), + params_D(ref_D.layout()), + ref_D(ref_D), + output_op(output_op), + splitk_slice_stride(splitk_slice_stride) { + + int full_gemm_k_iterations = problem_size.k() / Mma::Shape::kK; + int gemm_k_iterations = full_gemm_k_iterations / grid_tiled_shape.k(); + + gemm_k_size = gemm_k_iterations * Mma::Shape::kK; + } + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + GemmSplitKParallel() { } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.k() * params.gemm_k_size, + }; + + cutlass::MatrixCoord tb_offset_B{ + threadblock_tile_offset.k() * params.gemm_k_size, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + // Problem size is a function of threadblock index in the K dimension + int problem_size_k; + if (threadblock_tile_offset.k() + 1 == params.grid_tiled_shape.k()) { + problem_size_k = params.problem_size.k(); + } + else { + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + params.ref_A.data(), + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + params.ref_B.data(), + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + int warp_idx = threadIdx.x / 32; + int lane_idx = threadIdx.x % 32; + + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); + + // + // Epilogue + // + + OutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + // Tile iterator writing to output tile + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + params.ref_D.data(), + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + iterator_D.add_pointer_offset(params.splitk_slice_stride * threadblock_tile_offset.k()); + + // Execute the epilogue + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Run efficient epilogue + epilogue(output_op, iterator_D, accumulators, iterator_D); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h new file mode 100644 index 0000000000000000000000000000000000000000..6d6714d805ccbcf2d009391c6d5e8703127e98e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h @@ -0,0 +1,2405 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Stream-K Gemm kernel compatible with fused epilogues + that broadcast a bias vector over the MMA output. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/layout/layout.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/barrier.h" +#include "cutlass/block_striped.h" +#include "cutlass/semaphore.h" + +#include "cutlass/trace.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + bool IsSingleSource = Epilogue_::kIsSingleSource +> +struct GemmStreamkWithFusedEpilogue; + +// GemmStreamkWithFusedEpilogue with two sources +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmStreamkWithFusedEpilogue { + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + /// The per-thread tile of raw accumulators + using AccumulatorTile = typename Mma::FragmentC; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Workspace bytes per thread block + static size_t const kWorkspaceBytesPerBlock = + __NV_STD_MAX( + kThreadCount * sizeof(AccumulatorTile), + Epilogue::kWorkspaceBytesPerBlock); + + /// Block-striped reduction utility + using BlockStripedReduceT = BlockStripedReduce; + + + + // + // Structures + // + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord problem_size; + int batch_count; // Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_C1; + void const * ptr_C2; + void * ptr_D; + + void * ptr_Vector; + void * ptr_Tensor; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C1; + int64_t batch_stride_C2; + int64_t batch_stride_D; + int64_t batch_stride_Vector; + int64_t batch_stride_Tensor; + + typename LayoutA::Stride::Index lda; + typename LayoutB::Stride::Index ldb; + typename LayoutC::Stride::Index ldc1; + typename LayoutC::Stride::Index ldc2; + typename LayoutC::Stride::Index ldd; + typename LayoutC::Stride::Index ldr; + typename LayoutC::Stride::Index ldt; + + int avail_sms; /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) + + + // + // Methods + // + + /// Default Constructor + Arguments(): + mode(GemmUniversalMode::kGemm), + batch_count(1), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C1(nullptr), + ptr_C2(nullptr), + ptr_D(nullptr), + avail_sms(-1) + {} + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K) + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C1, + void const * ptr_C2, + void * ptr_D, + void * ptr_Vector, + void * ptr_Tensor, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C1, + int64_t batch_stride_C2, + int64_t batch_stride_D, + int64_t batch_stride_Vector, + int64_t batch_stride_Tensor, + typename LayoutA::Stride::Index lda, + typename LayoutB::Stride::Index ldb, + typename LayoutC::Stride::Index ldc1, + typename LayoutC::Stride::Index ldc2, + typename LayoutC::Stride::Index ldd, + typename LayoutC::Stride::Index ldr, + typename LayoutC::Stride::Index ldt, + int avail_sms = -1) /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) + : + mode(mode), + problem_size(problem_size), + batch_count(batch_split), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C1(ptr_C1), ptr_C2(ptr_C2), ptr_D(ptr_D), + ptr_Vector(ptr_Vector), + ptr_Tensor(ptr_Tensor), + batch_stride_A(batch_stride_A), + batch_stride_B(batch_stride_B), + batch_stride_C1(batch_stride_C1), + batch_stride_C2(batch_stride_C2), + batch_stride_Vector(batch_stride_Vector), + batch_stride_Tensor(batch_stride_Tensor), + lda(lda), ldb(ldb), ldc1(ldc1), ldc2(ldc2), ldd(ldd), ldr(ldr), ldt(ldt), avail_sms(avail_sms) + { + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + CUTLASS_TRACE_HOST(" ldt: " << this->ldt); + CUTLASS_TRACE_HOST(" avail_sms: " << this->avail_sms); + } + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.batch_stride_A, args.batch_stride_B); + + return args; + } + }; + + + /// Parameters structure + struct Params + { + public: + + // + // Data members + // + + void * ptr_A; + void * ptr_B; + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + + int64_t batch_stride_A; + int64_t batch_stride_B; + + GemmUniversalMode mode; + + ThreadblockSwizzle block_mapping; + + void *barrier_workspace; + void *partials_workspace; + + typename EpilogueOutputOp::Params output_op; + + void * ptr_C1; + void * ptr_C2; + void * ptr_D; + void * ptr_Tensor; + void * ptr_Vector; + + typename Epilogue::OutputTileIterator::Params params_C1; + typename Epilogue::OutputTileIterator::Params params_C2; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::TensorTileIterator::Params params_Tensor; + + int64_t batch_stride_C1; + int64_t batch_stride_C2; + int64_t batch_stride_D; + int64_t batch_stride_Vector; + int64_t batch_stride_Tensor; + + typename LayoutC::Stride::Index ldr; + + protected: + + // + // Host-only dispatch-utilities + // + + /// Pad the given allocation size up to the nearest cache line + static size_t cacheline_align_up(size_t size) + { + static const int CACHELINE_SIZE = 128; + return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE; + } + + /// Get the workspace size needed for barrier + size_t get_barrier_workspace_size() const + { + // For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction, + // each reduction block needs its own synchronization flag. + int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); + int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks); + + return cacheline_align_up(sizeof(typename Barrier::T) * num_flags); + } + + /// Get the workspace size needed for intermediate partial sums + size_t get_partials_workspace_size() const + { + int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); + return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks); + } + + + public: + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + params_A(args.lda), + params_B(args.ldb), + params_C1(args.ldc1), + params_C2(args.ldc2), + params_D(args.ldd), + params_Tensor(args.ldt), + output_op(args.epilogue), + mode(args.mode), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C1(const_cast(args.ptr_C1)), + ptr_C2(const_cast(args.ptr_C2)), + ptr_D(args.ptr_D), + ptr_Vector(args.ptr_Vector), + ldr(args.ldr), + ptr_Tensor(args.ptr_Tensor), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C1(args.batch_stride_C1), + batch_stride_C2(args.batch_stride_C2), + batch_stride_D(args.batch_stride_D), + batch_stride_Vector(args.batch_stride_Vector), + batch_stride_Tensor(args.batch_stride_Tensor), + barrier_workspace(nullptr), + partials_workspace(nullptr) + { + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::Params() - problem_size: " << problem_size); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + CUTLASS_TRACE_HOST(" ldt: " << args.ldt); + CUTLASS_TRACE_HOST(" avail_sms: " << avail_sms); + + // Number of SMs to make available for StreamK decomposition + int avail_sms = (args.avail_sms == -1) ? + device_sms : + fast_min(args.avail_sms, device_sms); + + // Initialize the block mapping structure + block_mapping = ThreadblockSwizzle( + typename ThreadblockSwizzle::template KernelTraits(), + args.mode, + args.problem_size, + {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, + args.batch_count, + sm_occupancy, + device_sms, + avail_sms); + } + + /// Returns the workspace size (in bytes) needed for these parameters + size_t get_workspace_size() const + { + return + get_barrier_workspace_size() + + get_partials_workspace_size(); + } + + /// Assign and initialize the specified workspace buffer. Assumes + /// the memory allocated to workspace is at least as large as get_workspace_size(). + Status init_workspace( + void *workspace, + cudaStream_t stream = nullptr) + { + uint8_t *ptr = static_cast(workspace); + + + // Establish partials workspace + partials_workspace = nullptr; + size_t partials_workspace_bytes = get_partials_workspace_size(); + if (partials_workspace_bytes > 0) + { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + partials_workspace = ptr; + ptr += partials_workspace_bytes; + } + + // Establish barrier workspace + barrier_workspace = nullptr; + size_t barrier_workspace_bytes = get_barrier_workspace_size(); + if (barrier_workspace_bytes > 0) + { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + barrier_workspace = ptr; + ptr += barrier_workspace_bytes; + } + + // Zero-initialize barrier workspace + if (barrier_workspace) + { + size_t barrier_workspace_bytes = get_barrier_workspace_size(); + + CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes"); + + cudaError_t result = cudaMemsetAsync( + barrier_workspace, + 0, + barrier_workspace_bytes, + stream); + + if (result != cudaSuccess) { + CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result)); + return Status::kErrorInternal; + } + } + + return Status::kSuccess; + } + + + /// Returns the GEMM volume in thread block tiles + cutlass::gemm::GemmCoord get_tiled_shape() const + { + return block_mapping.tiled_shape(); + } + + /// Returns the total number of thread blocks to launch + int get_grid_blocks() const + { + dim3 grid_dims = get_grid_dims(); + return grid_dims.x * grid_dims.y * grid_dims.z; + } + + /// Returns the grid extents in thread blocks to launch + dim3 get_grid_dims() const + { + return block_mapping.get_grid_dims(); + } + + /// Lightweight update given a subset of arguments. Problem geometry is assumed + /// to remain the same. + CUTLASS_HOST_DEVICE + void update(Arguments const &args) + { + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C1 = const_cast(args.ptr_C1); + ptr_C2 = const_cast(args.ptr_C2); + ptr_D = args.ptr_D; + + ptr_Vector = args.ptr_Vector; + ldr = args.ldr; + ptr_Tensor = args.ptr_Tensor; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C1 = args.batch_stride_C1; + batch_stride_C2 = args.batch_stride_C2; + batch_stride_D = args.batch_stride_D; + batch_stride_Vector = args.batch_stride_Vector; + batch_stride_Tensor = args.batch_stride_Tensor; + + output_op = args.epilogue; + + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::update()"); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + } + }; + + /// Tile work descriptor + struct TileWorkDesc + { + /// The linear tile index + int tile_idx; + + /// The location of this tile (in threadblock-tile coordinates) in the output matrix + cutlass::gemm::GemmCoord tiled_coord; + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + int iter_begin; + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + int k_begin; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + int k_end; + + /// The number of remaining MAC-iterations this threadblock will perform for this tile + int k_iters_remaining; + + // Whether this block will perform the first iteration of this tile + CUTLASS_DEVICE + bool tile_started() + { + return (k_begin == 0); + } + + // Whether this block will perform the last iteration of this tile + CUTLASS_DEVICE + bool tile_finished(Params const ¶ms) + { + return (k_end == params.block_mapping.problem_size.k()); + } + }; + + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + +protected: + + // + // Data members + // + + /// GEMM problem parameters + Params const ¶ms; + + /// Shared storage reference + SharedStorage &shared_storage; + + /// ID within the threadblock + int thread_idx; + + /// ID of warp + int warp_idx; + + /// ID of each thread within a warp + int lane_idx; + + /// Threadblock scoped epilogue + Epilogue epilogue; + + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::can_implement()"); + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (platform::is_same::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } else if (platform::is_same::value) { + isAMisaligned = problem_size.m() % kAlignmentA; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } + + if (platform::is_same::value) { + isBMisaligned = problem_size.n() % kAlignmentB; + } else if (platform::is_same::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } + + if (platform::is_same::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } else if (platform::is_same::value) { + isCMisaligned = problem_size.m() % kAlignmentC; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } + + if (isAMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); + return Status::kErrorMisalignedOperand; + } + + if (isBMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); + return Status::kErrorMisalignedOperand; + } + + if (isCMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); + return Status::kErrorMisalignedOperand; + } + + CUTLASS_TRACE_HOST(" returning kSuccess"); + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + +protected: + + // + // Device-only utility methods + // + + /// Iterator for fetching tile fragments from A + CUTLASS_DEVICE + typename Mma::IteratorA init_iterator_A( + TileWorkDesc &tile_work, + GemmUniversalMode mode) + { + // The input A matrix + ElementA *ptr_A = static_cast(params.ptr_A); + + // Update input pointers based on batched/array mode + if (mode == GemmUniversalMode::kBatched) { + ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A; + } + if (mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[tile_work.tiled_coord.k()]; + } + + int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM; + int m_end = params.block_mapping.problem_size.m(); + return Mma::IteratorA( + params.params_A, + ptr_A, + { m_end, tile_work.k_end }, + threadIdx.x, + { m_begin, tile_work.k_begin }); + + } + + + /// Iterator for fetching tile fragments from B + CUTLASS_DEVICE + typename Mma::IteratorB init_iterator_B( + TileWorkDesc &tile_work, + GemmUniversalMode mode) + { + // The input B matrix + ElementB *ptr_B = static_cast(params.ptr_B); + + // Update input pointers based on batched/array mode + if (mode == GemmUniversalMode::kBatched) { + ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B; + } + if (mode == GemmUniversalMode::kArray) { + ptr_B = static_cast(params.ptr_B)[tile_work.tiled_coord.k()]; + } + + int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN; + int n_end = params.block_mapping.problem_size.n(); + return Mma::IteratorB( + params.params_B, + ptr_B, + { tile_work.k_end, n_end }, + threadIdx.x, + { tile_work.k_begin, n_begin }); + } + + + CUTLASS_DEVICE + void init_dp_tile_work( + TileWorkDesc &tile_work, + int tile_idx) + { + // The linear tile index + tile_work.tile_idx = tile_idx; + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile(); + + // The number of MAC-iterations this threadblock will perform for this tile + tile_work.k_iters_remaining = params.block_mapping.iters_per_tile(); + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_begin = 0; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_end = params.block_mapping.problem_size.k(); + + // The location of this tile (in threadblock-tile coordinates) in the output matrix + tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); + } + + + CUTLASS_DEVICE + void init_sk_tile_work( + TileWorkDesc &tile_work, + int tile_idx, + int block_iter_begin, + int block_iter_end) + { + // The linear tile index + tile_work.tile_idx = tile_idx; + + // The first global-scoped MAC-iteration for this tile + int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile(); + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + tile_work.iter_begin = max(block_iter_begin, tile_iter_begin); + + // The first tile-scoped MAC-iteration this threadblock will perform for this tile + int k_iter_begin = tile_work.iter_begin - tile_iter_begin; + + // The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile + int k_iter_end = block_iter_end - tile_iter_begin; + + // The number of MAC-iterations this threadblock will perform for this tile + tile_work.k_iters_remaining = k_iter_end - k_iter_begin; + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_begin = k_iter_begin * Mma::Shape::kK; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_end = min( + params.block_mapping.problem_size.k(), // extent of k domain + (k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment + + // The location of this tile (in threadblock-tile coordinates) in the output matrix + tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); + } + + + /// Share accumulators with peers + CUTLASS_DEVICE + void share_accumulators( + AccumulatorTile const &accumulator_tile, + int block_idx, + int first_block_idx) + { + AccumulatorTile *accum_tile_workspace = reinterpret_cast(params.partials_workspace); + + int accum_tile_offset = first_block_idx * kThreadCount; + + if (block_idx == first_block_idx) + { + // First peer initializes the workspace partials + BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); + } + else + { + // Subsequent peers atomically accumulate into the workspace partials + if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) + { + // Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them + Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1); + } + else + { + // Turnstile reduction order: wait until the previous peer has written + int wait_count = block_idx - first_block_idx; + Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count); + } + + // Perform reduction in workspace + BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); + } + + // Signal our arrival + Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx); + } + + + /// Acquire accumulators from peers + CUTLASS_DEVICE + void acquire_accumulators( + AccumulatorTile &accumulator_tile, + int block_idx, + int first_block_idx) + { + AccumulatorTile *accum_tile_workspace = reinterpret_cast(params.partials_workspace); + + // Wait for arrival + int num_carry_in = block_idx - first_block_idx; + Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in); + + // Load and add peer-partials accumulator tile to local accumulator tile + int accum_tile_offset = first_block_idx * kThreadCount; + BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx); + } + + + /// Perform epilogue computations and output + CUTLASS_DEVICE + void do_epilogue( + TileWorkDesc &tile_work, + AccumulatorTile &accumulator_tile) + { + ElementC *ptr_C1 = static_cast(params.ptr_C1); + ElementC *ptr_C2 = static_cast(params.ptr_C2); + ElementC *ptr_D = static_cast(params.ptr_D); + typename Epilogue::ElementTensor *ptr_Tensor = static_cast(params.ptr_Tensor); + + // Define the reduction output pointer and move to the appropriate place + typename Epilogue::ElementVector *ptr_Vector = + static_cast(params.ptr_Vector); + + // Update pointers for batched/array mode(s) + if (params.mode == GemmUniversalMode::kBatched) { + ptr_C1 += tile_work.tiled_coord.k() * params.batch_stride_C1; + if (ptr_C2) { + ptr_C2 += tile_work.tiled_coord.k() * params.batch_stride_C2; + } + ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D; + if (ptr_Tensor) { + ptr_Tensor += tile_work.tiled_coord.k() * params.batch_stride_Tensor; + } + if (ptr_Vector) { + ptr_Vector += tile_work.tiled_coord.k() * params.batch_stride_Vector; + } + } + if (params.mode == GemmUniversalMode::kArray) { + ptr_C1 = static_cast(params.ptr_C1)[tile_work.tiled_coord.k()]; + if (ptr_C2) { + ptr_C2 = static_cast(params.ptr_C2)[tile_work.tiled_coord.k()]; + } + ptr_D = static_cast(params.ptr_D)[tile_work.tiled_coord.k()]; + if (ptr_Tensor) { + ptr_Tensor = static_cast(params.ptr_Tensor)[tile_work.tiled_coord.k()]; + } + if (ptr_Vector) { + ptr_Vector = static_cast(params.ptr_Vector)[tile_work.tiled_coord.k()]; + } + } + + // Location of this tile in item-coords + MatrixCoord threadblock_item_begin( + tile_work.tiled_coord.m() * Mma::Shape::kM, + tile_work.tiled_coord.n() * Mma::Shape::kN + ); + + // Tile iterator loading from residual1. + typename Epilogue::OutputTileIterator iterator_C1( + params.params_C1, + ptr_C1, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Tile iterator loading from residual2. + typename Epilogue::OutputTileIterator iterator_C2( + params.params_C2, + ptr_C2, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Additional tensor to load from + typename Epilogue::TensorTileIterator tensor_iterator( + params.params_Tensor, + ptr_Tensor, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Move to appropriate location for this output tile + if (ptr_Vector) { + ptr_Vector += threadblock_item_begin.column() + tile_work.tiled_coord.m() * params.ldr; + } + + // Execute the epilogue operator to update the destination tensor. + epilogue( + EpilogueOutputOp(params.output_op), + ptr_Vector, + iterator_D, + accumulator_tile, + iterator_C1, + iterator_C2, + tensor_iterator, + params.block_mapping.problem_size.mn(), + threadblock_item_begin); + } + + + CUTLASS_DEVICE + void separate_reduction(int reduce_idx) + { + int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx; + + // Reduce by sk-tile (every tile contributed to by one or more blocks) + reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments; + reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments; + + int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile(); + int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1; + + peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first); + peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last); + + // Wait for peers to complete + int peer_idx_end = peer_idx_last + 1; + int num_peers = peer_idx_end - peer_idx_begin; + Barrier::wait_eq_reset( + params.barrier_workspace, + thread_idx, + (reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx, + num_peers); + + /// The location of this tile (in threadblock-tile coordinates) in the output matrix + GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx); + + // Location of this tile in item-coords + MatrixCoord threadblock_item_begin( + tiled_coord.m() * Mma::Shape::kM, + tiled_coord.n() * Mma::Shape::kN + ); + + ElementC *ptr_C1 = static_cast(params.ptr_C1); + ElementC *ptr_C2 = static_cast(params.ptr_C2); + ElementC *ptr_D = static_cast(params.ptr_D); + typename Epilogue::ElementTensor *ptr_Tensor = static_cast(params.ptr_Tensor); + + // Define the reduction output pointer and move to the appropriate place + typename Epilogue::ElementVector *ptr_Vector = + static_cast(params.ptr_Vector); + + // Tile iterator loading from residual1. + typename Epilogue::OutputTileIterator iterator_C1( + params.params_C1, + ptr_C1, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Tile iterator loading from residual2. + typename Epilogue::OutputTileIterator iterator_C2( + params.params_C2, + ptr_C2, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Additional tensor to load from + typename Epilogue::TensorTileIterator tensor_iterator( + params.params_Tensor, + ptr_Tensor, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Move to appropriate location for this output tile + if (ptr_Vector) { + ptr_Vector += threadblock_item_begin.column() + tiled_coord.m() * params.ldr; + } + + // Execute the epilogue operator to update the destination tensor. + epilogue.reduce( + peer_idx_begin, + peer_idx_end, + reduce_fragment_idx, + params.partials_workspace, + EpilogueOutputOp(params.output_op), + ptr_Vector, + iterator_D, + iterator_C1, + iterator_C2, + tensor_iterator, + params.block_mapping.problem_size.mn(), + threadblock_item_begin); + } + + + CUTLASS_DEVICE + void process_tile( + TileWorkDesc tile_work, + int block_idx, + int dp_start_block_idx, + int block_iter_begin) + { + // Initialize input iterators + typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode); + typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode); + + // Initialize accumulators + AccumulatorTile accumulator_tile; + accumulator_tile.clear(); + + // Initialize MMA abstraction + Mma mma( + shared_storage.main_loop, + thread_idx, + warp_idx, + lane_idx); + + // Perform this tile's range of multiply-accumulate (MAC) iterations + mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile); + + if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) || + (params.block_mapping.reduction_blocks == 0) || + (block_idx >= dp_start_block_idx)) + { + // + // Cooperative SK peer reduction or DP block + // + + int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx); + + if (!tile_work.tile_finished(params)) { + // Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace + share_accumulators(accumulator_tile, block_idx, first_block_idx); + } + else + { + // DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile + if (!tile_work.tile_started()) + { + // A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks + acquire_accumulators(accumulator_tile, block_idx, first_block_idx); + } + + do_epilogue(tile_work, accumulator_tile); + } + } + else + { + // + // Separate peer reduction + // + + // Share accumulator partial sums with peer threadblock(s) through scratch workspace + epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started()); + + // Signal arrival + Barrier::arrive_range_inc( + params.barrier_workspace, + thread_idx, + tile_work.tile_idx * Epilogue::kAccumulatorFragments, + Epilogue::kAccumulatorFragments); + } + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void gemm() + { + // Initialize block's iteration range + int tile_idx = 0; + int block_iter_begin = 0; + int block_iters_remaining = 0; + + int block_idx = params.block_mapping.get_block_idx(); + + int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region(); + int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms; + int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks; + int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks; + + // Initialize tile work descriptor + TileWorkDesc tile_work; + + bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx); + bool sk_block = (block_idx < sk_padding_start_block_idx); + bool reduce_block = (block_idx >= reduce_start_block_idx) && + (block_idx < grid_padding_start_block_idx) && + (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed); + + if (dp_block) + { + // This is a DP block + int dp_block_idx = block_idx - dp_start_block_idx; + int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles; + + // Blocks in first DP wave get configured number of tiles + tile_idx = first_dp_tile + dp_block_idx; + int tile_allottment = params.block_mapping.dp_first_wave_tiles; + + // Blocks in subsequent DP waves get 1 tile + if (dp_block_idx >= params.block_mapping.avail_sms) { + tile_allottment = 1; + tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms; + } + + block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment; + + init_dp_tile_work(tile_work, tile_idx); + + // DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1) + if ((tile_idx < params.block_mapping.sk_tiles) || + (tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) || + (tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n())) + { + return; + } + } + else if (sk_block) + { + // This is a SK block + int block_iter_end; + params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end); + block_iters_remaining = block_iter_end - block_iter_begin; + + tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1); + init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); + } + else + { + if (reduce_block) + { + // This is a reduction threadblock + int reduce_block_idx = block_idx - reduce_start_block_idx; + separate_reduction(reduce_block_idx); + } + + return; + } + + // Iteration-processing loop body + CUTLASS_PRAGMA_NO_UNROLL + while (true) + { + // Perform this block's share of work for this tile + process_tile( + tile_work, + block_idx, + dp_start_block_idx, + block_iter_begin); + + block_iters_remaining -= tile_work.k_iters_remaining; + + if (block_iters_remaining == 0) + { + break; + } + + // Continue to next tile + __syncthreads(); + + if (block_idx >= dp_start_block_idx) + { + // DP block consume their tiles at stride + tile_idx += params.block_mapping.avail_sms; + init_dp_tile_work(tile_work, tile_idx); + } + else + { + // SK blocks consume their tiles in backwards order + tile_idx--; + init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); + } + } + + } + + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmStreamkWithFusedEpilogue op(params, shared_storage); + op(); + } + + + // Constructor + CUTLASS_DEVICE + GemmStreamkWithFusedEpilogue( + Params const ¶ms, + SharedStorage &shared_storage) + : + params(params), + shared_storage(shared_storage), + thread_idx(threadIdx.x), + warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code + lane_idx(threadIdx.x % 32), + epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx) + {} + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()() { + // Generic SK code path + gemm(); + + } +}; + + +// GemmStreamkWithFusedEpilogue with one source +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmStreamkWithFusedEpilogue { + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + /// The per-thread tile of raw accumulators + using AccumulatorTile = typename Mma::FragmentC; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Workspace bytes per thread block + static size_t const kWorkspaceBytesPerBlock = + __NV_STD_MAX( + kThreadCount * sizeof(AccumulatorTile), + Epilogue::kWorkspaceBytesPerBlock); + + /// Block-striped reduction utility + using BlockStripedReduceT = BlockStripedReduce; + + + + // + // Structures + // + + /// Argument structure + struct Arguments + { + + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord problem_size; + int batch_count; // Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_C; + void * ptr_D; + + void * ptr_Vector; + void * ptr_Tensor; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + int64_t batch_stride_Vector; + int64_t batch_stride_Tensor; + + typename LayoutA::Stride::Index lda; + typename LayoutB::Stride::Index ldb; + typename LayoutC::Stride::Index ldc; + typename LayoutC::Stride::Index ldd; + typename LayoutC::Stride::Index ldr; + typename LayoutC::Stride::Index ldt; + + int avail_sms; /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) + + + // + // Methods + // + + /// Default Constructor + Arguments(): + mode(GemmUniversalMode::kGemm), + batch_count(1), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + avail_sms(-1) + {} + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K) + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C, + void * ptr_D, + void * ptr_Vector, + void * ptr_Tensor, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + int64_t batch_stride_Vector, + int64_t batch_stride_Tensor, + typename LayoutA::Stride::Index lda, + typename LayoutB::Stride::Index ldb, + typename LayoutC::Stride::Index ldc, + typename LayoutC::Stride::Index ldd, + typename LayoutC::Stride::Index ldr, + typename LayoutC::Stride::Index ldt, + int avail_sms = -1) /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) + : + mode(mode), + problem_size(problem_size), + batch_count(batch_split), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + ptr_Vector(ptr_Vector), + ptr_Tensor(ptr_Tensor), + batch_stride_A(batch_stride_A), + batch_stride_B(batch_stride_B), + batch_stride_C(batch_stride_C), + batch_stride_Vector(batch_stride_Vector), + batch_stride_Tensor(batch_stride_Tensor), + lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ldr(ldr), ldt(ldt), avail_sms(avail_sms) + { + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + CUTLASS_TRACE_HOST(" ldt: " << this->ldt); + CUTLASS_TRACE_HOST(" avail_sms: " << this->avail_sms); + } + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.batch_stride_A, args.batch_stride_B); + + return args; + } + }; + + + /// Parameters structure + struct Params + { + + public: + + // + // Data members + // + + void * ptr_A; + void * ptr_B; + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + + int64_t batch_stride_A; + int64_t batch_stride_B; + + GemmUniversalMode mode; + + ThreadblockSwizzle block_mapping; + + void *barrier_workspace; + void *partials_workspace; + + typename EpilogueOutputOp::Params output_op; + + void * ptr_C; + void * ptr_D; + void * ptr_Tensor; + void * ptr_Vector; + + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::TensorTileIterator::Params params_Tensor; + + int64_t batch_stride_C; + int64_t batch_stride_D; + int64_t batch_stride_Vector; + int64_t batch_stride_Tensor; + + + typename LayoutC::Stride::Index ldr; + + protected: + + // + // Host-only dispatch-utilities + // + + /// Pad the given allocation size up to the nearest cache line + static size_t cacheline_align_up(size_t size) + { + static const int CACHELINE_SIZE = 128; + return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE; + } + + /// Get the workspace size needed for barrier + size_t get_barrier_workspace_size() const + { + // For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction, + // each reduction block needs its own synchronization flag. + int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); + int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks); + + return cacheline_align_up(sizeof(typename Barrier::T) * num_flags); + } + + /// Get the workspace size needed for intermediate partial sums + size_t get_partials_workspace_size() const + { + int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); + return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks); + } + + + public: + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + params_A(args.lda), + params_B(args.ldb), + params_C(args.ldc), + params_D(args.ldd), + params_Tensor(args.ldt), + output_op(args.epilogue), + mode(args.mode), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C(const_cast(args.ptr_C)), + ptr_D(args.ptr_D), + ptr_Vector(args.ptr_Vector), + ldr(args.ldr), + ptr_Tensor(args.ptr_Tensor), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C(args.batch_stride_C), + batch_stride_D(args.batch_stride_D), + batch_stride_Vector(args.batch_stride_Vector), + batch_stride_Tensor(args.batch_stride_Tensor), + barrier_workspace(nullptr), + partials_workspace(nullptr) + { + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::Params() - problem_size: " << problem_size); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + CUTLASS_TRACE_HOST(" ldt: " << args.ldt); + CUTLASS_TRACE_HOST(" avail_sms: " << avail_sms); + + // Number of SMs to make available for StreamK decomposition + int avail_sms = (args.avail_sms == -1) ? + device_sms : + fast_min(args.avail_sms, device_sms); + + // Initialize the block mapping structure + block_mapping = ThreadblockSwizzle( + typename ThreadblockSwizzle::template KernelTraits(), + args.mode, + args.problem_size, + {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, + args.batch_count, + sm_occupancy, + device_sms, + avail_sms); + } + + /// Returns the workspace size (in bytes) needed for these parameters + size_t get_workspace_size() const + { + return + get_barrier_workspace_size() + + get_partials_workspace_size(); + } + + + /// Assign and initialize the specified workspace buffer. Assumes + /// the memory allocated to workspace is at least as large as get_workspace_size(). + Status init_workspace( + void *workspace, + cudaStream_t stream = nullptr) + { + uint8_t *ptr = static_cast(workspace); + + // Establish partials workspace + partials_workspace = nullptr; + size_t partials_workspace_bytes = get_partials_workspace_size(); + if (partials_workspace_bytes > 0) + { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + partials_workspace = ptr; + ptr += partials_workspace_bytes; + } + + // Establish barrier workspace + barrier_workspace = nullptr; + size_t barrier_workspace_bytes = get_barrier_workspace_size(); + if (barrier_workspace_bytes > 0) + { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + barrier_workspace = ptr; + ptr += barrier_workspace_bytes; + } + + // Zero-initialize barrier workspace + if (barrier_workspace) + { + size_t barrier_workspace_bytes = get_barrier_workspace_size(); + + CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes"); + + cudaError_t result = cudaMemsetAsync( + barrier_workspace, + 0, + barrier_workspace_bytes, + stream); + + if (result != cudaSuccess) { + CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result)); + return Status::kErrorInternal; + } + } + + return Status::kSuccess; + } + + + /// Returns the GEMM volume in thread block tiles + cutlass::gemm::GemmCoord get_tiled_shape() const + { + return block_mapping.tiled_shape(); + } + + + /// Returns the total number of thread blocks to launch + int get_grid_blocks() const + { + dim3 grid_dims = get_grid_dims(); + return grid_dims.x * grid_dims.y * grid_dims.z; + } + + + /// Returns the grid extents in thread blocks to launch + dim3 get_grid_dims() const + { + return block_mapping.get_grid_dims(); + } + + /// Lightweight update given a subset of arguments. Problem geometry is assumed + /// to remain the same. + CUTLASS_HOST_DEVICE + void update(Arguments const &args) + { + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + + ptr_Vector = args.ptr_Vector; + ldr = args.ldr; + ptr_Tensor = args.ptr_Tensor; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + batch_stride_D = args.batch_stride_D; + batch_stride_Vector = args.batch_stride_Vector; + batch_stride_Tensor = args.batch_stride_Tensor; + + output_op = args.epilogue; + + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::update()"); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + } + }; + + /// Tile work descriptor + struct TileWorkDesc + { + /// The linear tile index + int tile_idx; + + /// The location of this tile (in threadblock-tile coordinates) in the output matrix + cutlass::gemm::GemmCoord tiled_coord; + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + int iter_begin; + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + int k_begin; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + int k_end; + + /// The number of remaining MAC-iterations this threadblock will perform for this tile + int k_iters_remaining; + + // Whether this block will perform the first iteration of this tile + CUTLASS_DEVICE + bool tile_started() + { + return (k_begin == 0); + } + + // Whether this block will perform the last iteration of this tile + CUTLASS_DEVICE + bool tile_finished(Params const ¶ms) + { + return (k_end == params.block_mapping.problem_size.k()); + } + }; + + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + +protected: + + // + // Data members + // + + /// GEMM problem parameters + Params const ¶ms; + + /// Shared storage reference + SharedStorage &shared_storage; + + /// ID within the threadblock + int thread_idx; + + /// ID of warp + int warp_idx; + + /// ID of each thread within a warp + int lane_idx; + + /// Threadblock scoped epilogue + Epilogue epilogue; + + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::can_implement()"); + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (platform::is_same::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } else if (platform::is_same::value) { + isAMisaligned = problem_size.m() % kAlignmentA; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } + + if (platform::is_same::value) { + isBMisaligned = problem_size.n() % kAlignmentB; + } else if (platform::is_same::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } + + if (platform::is_same::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } else if (platform::is_same::value) { + isCMisaligned = problem_size.m() % kAlignmentC; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } + + if (isAMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); + return Status::kErrorMisalignedOperand; + } + + if (isBMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); + return Status::kErrorMisalignedOperand; + } + + if (isCMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); + return Status::kErrorMisalignedOperand; + } + + CUTLASS_TRACE_HOST(" returning kSuccess"); + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + +protected: + + // + // Device-only utility methods + // + + /// Iterator for fetching tile fragments from A + CUTLASS_DEVICE + typename Mma::IteratorA init_iterator_A( + TileWorkDesc &tile_work, + GemmUniversalMode mode) + { + // The input A matrix + ElementA *ptr_A = static_cast(params.ptr_A); + + // Update input pointers based on batched/array mode + if (mode == GemmUniversalMode::kBatched) { + ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A; + } + if (mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[tile_work.tiled_coord.k()]; + } + + int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM; + int m_end = params.block_mapping.problem_size.m(); + return Mma::IteratorA( + params.params_A, + ptr_A, + { m_end, tile_work.k_end }, + threadIdx.x, + { m_begin, tile_work.k_begin }); + + } + + + /// Iterator for fetching tile fragments from B + CUTLASS_DEVICE + typename Mma::IteratorB init_iterator_B( + TileWorkDesc &tile_work, + GemmUniversalMode mode) + { + // The input B matrix + ElementB *ptr_B = static_cast(params.ptr_B); + + // Update input pointers based on batched/array mode + if (mode == GemmUniversalMode::kBatched) { + ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B; + } + if (mode == GemmUniversalMode::kArray) { + ptr_B = static_cast(params.ptr_B)[tile_work.tiled_coord.k()]; + } + + int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN; + int n_end = params.block_mapping.problem_size.n(); + return Mma::IteratorB( + params.params_B, + ptr_B, + { tile_work.k_end, n_end }, + threadIdx.x, + { tile_work.k_begin, n_begin }); + } + + + CUTLASS_DEVICE + void init_dp_tile_work( + TileWorkDesc &tile_work, + int tile_idx) + { + // The linear tile index + tile_work.tile_idx = tile_idx; + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile(); + + // The number of MAC-iterations this threadblock will perform for this tile + tile_work.k_iters_remaining = params.block_mapping.iters_per_tile(); + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_begin = 0; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_end = params.block_mapping.problem_size.k(); + + // The location of this tile (in threadblock-tile coordinates) in the output matrix + tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); + } + + + CUTLASS_DEVICE + void init_sk_tile_work( + TileWorkDesc &tile_work, + int tile_idx, + int block_iter_begin, + int block_iter_end) + { + // The linear tile index + tile_work.tile_idx = tile_idx; + + // The first global-scoped MAC-iteration for this tile + int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile(); + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + tile_work.iter_begin = max(block_iter_begin, tile_iter_begin); + + // The first tile-scoped MAC-iteration this threadblock will perform for this tile + int k_iter_begin = tile_work.iter_begin - tile_iter_begin; + + // The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile + int k_iter_end = block_iter_end - tile_iter_begin; + + // The number of MAC-iterations this threadblock will perform for this tile + tile_work.k_iters_remaining = k_iter_end - k_iter_begin; + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_begin = k_iter_begin * Mma::Shape::kK; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_end = min( + params.block_mapping.problem_size.k(), // extent of k domain + (k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment + + // The location of this tile (in threadblock-tile coordinates) in the output matrix + tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); + } + + + /// Share accumulators with peers + CUTLASS_DEVICE + void share_accumulators( + AccumulatorTile const &accumulator_tile, + int block_idx, + int first_block_idx) + { + AccumulatorTile *accum_tile_workspace = reinterpret_cast(params.partials_workspace); + + int accum_tile_offset = first_block_idx * kThreadCount; + + if (block_idx == first_block_idx) + { + // First peer initializes the workspace partials + BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); + } + else + { + // Subsequent peers atomically accumulate into the workspace partials + if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) + { + // Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them + Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1); + } + else + { + // Turnstile reduction order: wait until the previous peer has written + int wait_count = block_idx - first_block_idx; + Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count); + } + + // Perform reduction in workspace + BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); + } + + // Signal our arrival + Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx); + } + + + /// Acquire accumulators from peers + CUTLASS_DEVICE + void acquire_accumulators( + AccumulatorTile &accumulator_tile, + int block_idx, + int first_block_idx) + { + AccumulatorTile *accum_tile_workspace = reinterpret_cast(params.partials_workspace); + + // Wait for arrival + int num_carry_in = block_idx - first_block_idx; + Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in); + + // Load and add peer-partials accumulator tile to local accumulator tile + int accum_tile_offset = first_block_idx * kThreadCount; + BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx); + } + + + /// Perform epilogue computations and output + CUTLASS_DEVICE + void do_epilogue( + TileWorkDesc &tile_work, + AccumulatorTile &accumulator_tile) + { + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + typename Epilogue::ElementTensor *ptr_Tensor = static_cast(params.ptr_Tensor); + + // Define the reduction output pointer and move to the appropriate place + typename Epilogue::ElementVector *ptr_Vector = + static_cast(params.ptr_Vector); + + // Update pointers for batched/array mode(s) + if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += tile_work.tiled_coord.k() * params.batch_stride_C; + ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D; + if (ptr_Tensor) { + ptr_Tensor += tile_work.tiled_coord.k() * params.batch_stride_Tensor; + } + if (ptr_Vector) { + ptr_Vector += tile_work.tiled_coord.k() * params.batch_stride_Vector; + } + } + if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[tile_work.tiled_coord.k()]; + ptr_D = static_cast(params.ptr_D)[tile_work.tiled_coord.k()]; + if (ptr_Tensor) { + ptr_Tensor = static_cast(params.ptr_Tensor)[tile_work.tiled_coord.k()]; + } + if (ptr_Vector) { + ptr_Vector = static_cast(params.ptr_Vector)[tile_work.tiled_coord.k()]; + } + } + + // Location of this tile in item-coords + MatrixCoord threadblock_item_begin( + tile_work.tiled_coord.m() * Mma::Shape::kM, + tile_work.tiled_coord.n() * Mma::Shape::kN + ); + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Additional tensor to load from + typename Epilogue::TensorTileIterator tensor_iterator( + params.params_Tensor, + ptr_Tensor, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Move to appropriate location for this output tile + if (ptr_Vector) { + ptr_Vector += threadblock_item_begin.column() + tile_work.tiled_coord.m() * params.ldr; + } + + // Execute the epilogue operator to update the destination tensor. + epilogue( + EpilogueOutputOp(params.output_op), + ptr_Vector, + iterator_D, + accumulator_tile, + iterator_C, + tensor_iterator, + params.block_mapping.problem_size.mn(), + threadblock_item_begin); + } + + + CUTLASS_DEVICE + void separate_reduction(int reduce_idx) + { + int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx; + + // Reduce by sk-tile (every tile contributed to by one or more blocks) + reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments; + reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments; + + int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile(); + int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1; + + peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first); + peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last); + + // Wait for peers to complete + int peer_idx_end = peer_idx_last + 1; + int num_peers = peer_idx_end - peer_idx_begin; + Barrier::wait_eq_reset( + params.barrier_workspace, + thread_idx, + (reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx, + num_peers); + + /// The location of this tile (in threadblock-tile coordinates) in the output matrix + GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx); + + // Location of this tile in item-coords + MatrixCoord threadblock_item_begin( + tiled_coord.m() * Mma::Shape::kM, + tiled_coord.n() * Mma::Shape::kN + ); + + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + typename Epilogue::ElementTensor *ptr_Tensor = static_cast(params.ptr_Tensor); + + // Define the reduction output pointer and move to the appropriate place + typename Epilogue::ElementVector *ptr_Vector = + static_cast(params.ptr_Vector); + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Additional tensor to load from + typename Epilogue::TensorTileIterator tensor_iterator( + params.params_Tensor, + ptr_Tensor, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Move to appropriate location for this output tile + if (ptr_Vector) { + ptr_Vector += threadblock_item_begin.column() + tiled_coord.m() * params.ldr; + } + + // Execute the epilogue operator to update the destination tensor. + epilogue.reduce( + peer_idx_begin, + peer_idx_end, + reduce_fragment_idx, + params.partials_workspace, + EpilogueOutputOp(params.output_op), + ptr_Vector, + iterator_D, + iterator_C, + tensor_iterator, + params.block_mapping.problem_size.mn(), + threadblock_item_begin); + } + + + CUTLASS_DEVICE + void process_tile( + TileWorkDesc tile_work, + int block_idx, + int dp_start_block_idx, + int block_iter_begin) + { + // Initialize input iterators + typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode); + typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode); + + // Initialize accumulators + AccumulatorTile accumulator_tile; + accumulator_tile.clear(); + + // Initialize MMA abstraction + Mma mma( + shared_storage.main_loop, + thread_idx, + warp_idx, + lane_idx); + + // Perform this tile's range of multiply-accumulate (MAC) iterations + mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile); + + if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) || + (params.block_mapping.reduction_blocks == 0) || + (block_idx >= dp_start_block_idx)) + { + // + // Cooperative SK peer reduction or DP block + // + + int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx); + + if (!tile_work.tile_finished(params)) { + // Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace + share_accumulators(accumulator_tile, block_idx, first_block_idx); + } + else + { + // DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile + if (!tile_work.tile_started()) + { + // A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks + acquire_accumulators(accumulator_tile, block_idx, first_block_idx); + } + + do_epilogue(tile_work, accumulator_tile); + } + } + else + { + // + // Separate peer reduction + // + + // Share accumulator partial sums with peer threadblock(s) through scratch workspace + epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started()); + + // Signal arrival + Barrier::arrive_range_inc( + params.barrier_workspace, + thread_idx, + tile_work.tile_idx * Epilogue::kAccumulatorFragments, + Epilogue::kAccumulatorFragments); + } + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void gemm() + { + // Initialize block's iteration range + int tile_idx = 0; + int block_iter_begin = 0; + int block_iters_remaining = 0; + + int block_idx = params.block_mapping.get_block_idx(); + + int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region(); + int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms; + int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks; + int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks; + + // Initialize tile work descriptor + TileWorkDesc tile_work; + + bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx); + bool sk_block = (block_idx < sk_padding_start_block_idx); + bool reduce_block = (block_idx >= reduce_start_block_idx) && + (block_idx < grid_padding_start_block_idx) && + (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed); + + if (dp_block) + { + // This is a DP block + int dp_block_idx = block_idx - dp_start_block_idx; + int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles; + + // Blocks in first DP wave get configured number of tiles + tile_idx = first_dp_tile + dp_block_idx; + int tile_allottment = params.block_mapping.dp_first_wave_tiles; + + // Blocks in subsequent DP waves get 1 tile + if (dp_block_idx >= params.block_mapping.avail_sms) { + tile_allottment = 1; + tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms; + } + + block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment; + + init_dp_tile_work(tile_work, tile_idx); + + // DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1) + if ((tile_idx < params.block_mapping.sk_tiles) || + (tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) || + (tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n())) + { + return; + } + } + else if (sk_block) + { + // This is a SK block + int block_iter_end; + params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end); + block_iters_remaining = block_iter_end - block_iter_begin; + + tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1); + init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); + } + else + { + if (reduce_block) + { + // This is a reduction threadblock + int reduce_block_idx = block_idx - reduce_start_block_idx; + separate_reduction(reduce_block_idx); + } + + return; + } + + // Iteration-processing loop body + CUTLASS_PRAGMA_NO_UNROLL + while (true) + { + // Perform this block's share of work for this tile + process_tile( + tile_work, + block_idx, + dp_start_block_idx, + block_iter_begin); + + block_iters_remaining -= tile_work.k_iters_remaining; + + if (block_iters_remaining == 0) + { + break; + } + + // Continue to next tile + __syncthreads(); + + if (block_idx >= dp_start_block_idx) + { + // DP block consume their tiles at stride + tile_idx += params.block_mapping.avail_sms; + init_dp_tile_work(tile_work, tile_idx); + } + else + { + // SK blocks consume their tiles in backwards order + tile_idx--; + init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); + } + } + + } + + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmStreamkWithFusedEpilogue op(params, shared_storage); + op(); + } + + + // Constructor + CUTLASS_DEVICE + GemmStreamkWithFusedEpilogue( + Params const ¶ms, + SharedStorage &shared_storage) + : + params(params), + shared_storage(shared_storage), + thread_idx(threadIdx.x), + warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code + lane_idx(threadIdx.x % 32), + epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx) + {} + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()() { + // Generic SK code path + gemm(); + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_transpose_operands.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_transpose_operands.h new file mode 100644 index 0000000000000000000000000000000000000000..dec99356e6007b67a30781ba1713f72fe3eb618c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_transpose_operands.h @@ -0,0 +1,124 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! + \file + \brief The universal GEMM accommodates serial reductions, parallel reductions, batched strided, and + batched array variants. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA_, + typename LayoutA_, + ComplexTransform TransformA, + int AlignmentA, + typename ElementB_, + typename LayoutB_, + ComplexTransform TransformB, + int AlignmentB, + typename LayoutC_, + bool Transpose +> +struct MapArguments { + using ElementA = ElementA_; + using LayoutA = LayoutA_; + static ComplexTransform const kTransformA = TransformA; + static int const kAlignmentA = AlignmentA; + using ElementB = ElementB_; + using LayoutB = LayoutB_; + static ComplexTransform const kTransformB = TransformB; + static int const kAlignmentB = AlignmentB; + using LayoutC = LayoutC_; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA_, + typename LayoutA_, + ComplexTransform TransformA, + int AlignmentA, + typename ElementB_, + typename LayoutB_, + ComplexTransform TransformB, + int AlignmentB, + typename LayoutC_ +> +struct MapArguments< + ElementA_, + LayoutA_, + TransformA, + AlignmentA, + ElementB_, + LayoutB_, + TransformB, + AlignmentB, + LayoutC_, + true +> { + using ElementA = ElementB_; + using LayoutA = typename layout::LayoutTranspose::type; + static ComplexTransform const kTransformA = TransformB; + static int const kAlignmentA = AlignmentB; + using ElementB = ElementA_; + using LayoutB = typename layout::LayoutTranspose::type; + static ComplexTransform const kTransformB = TransformA; + static int const kAlignmentB = AlignmentA; + using LayoutC = typename layout::LayoutTranspose::type; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} +} +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..f095bc533f51ced764a43c5fbbcf6bc169f8401d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal.h @@ -0,0 +1,702 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/arch/arch.h" +#include "cutlass/fast_math.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" + +#include "cutlass/layout/matrix.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/params_universal_base.h" +#include "cutlass/trace.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +class GemmUniversal< + Mma_, + Epilogue_, + ThreadblockSwizzle_, + void, + // 3.x kernels use the first template argument to define the ProblemShape tuple + // We use this invariant to SFINAE dispatch against either the 2.x API or the 3.x API + cute::enable_if_t::value> +> { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Split-K preserves splits that are 128b aligned + static int const kSplitKAlignment = const_max(128 / sizeof_bits::value, 128 / sizeof_bits::value); + + // + // Structures + // + + /// Argument structure + struct Arguments : UniversalArgumentsBase + { + // + // Data members + // + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + + typename LayoutA::Stride stride_a; + typename LayoutB::Stride stride_b; + typename LayoutC::Stride stride_c; + typename LayoutC::Stride stride_d; + + typename LayoutA::Stride::LongIndex lda; + typename LayoutB::Stride::LongIndex ldb; + typename LayoutC::Stride::LongIndex ldc; + typename LayoutC::Stride::LongIndex ldd; + + int const * ptr_gather_A_indices; + int const * ptr_gather_B_indices; + int const * ptr_scatter_D_indices; + + // + // Methods + // + + Arguments(): + ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr), + ptr_gather_A_indices(nullptr), + ptr_gather_B_indices(nullptr), + ptr_scatter_D_indices(nullptr) + {} + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + typename LayoutA::Stride stride_a, + typename LayoutB::Stride stride_b, + typename LayoutC::Stride stride_c, + typename LayoutC::Stride stride_d, + int const *ptr_gather_A_indices = nullptr, + int const *ptr_gather_B_indices = nullptr, + int const *ptr_scatter_D_indices = nullptr) + : + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), + stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d), + ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices), + ptr_scatter_D_indices(ptr_scatter_D_indices) + { + lda = 0; + ldb = 0; + ldc = 0; + ldd = 0; + CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); + } + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + typename LayoutA::Stride::LongIndex lda, + typename LayoutB::Stride::LongIndex ldb, + typename LayoutC::Stride::LongIndex ldc, + typename LayoutC::Stride::LongIndex ldd, + int const *ptr_gather_A_indices = nullptr, + int const *ptr_gather_B_indices = nullptr, + int const *ptr_scatter_D_indices = nullptr + ): + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), + lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), + ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices), + ptr_scatter_D_indices(ptr_scatter_D_indices) + { + stride_a = make_Coord(lda); + stride_b = make_Coord(ldb); + stride_c = make_Coord(ldc); + stride_d = make_Coord(ldd); + CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); + } + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const + { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.stride_a, args.stride_b); + std::swap(args.batch_stride_A, args.batch_stride_B); + std::swap(args.ptr_gather_A_indices, args.ptr_gather_B_indices); + + return args; + } + }; + + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params : UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB> + { + using ParamsBase = UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB>; + + // + // Data members + // + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::Params params_D; + + typename EpilogueOutputOp::Params output_op; + + void * ptr_A; + void * ptr_B; + void * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + + int * ptr_gather_A_indices; + int * ptr_gather_B_indices; + int * ptr_scatter_D_indices; + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + ParamsBase(args, device_sms, sm_occupancy), + params_A(args.lda ? make_Coord_with_padding(args.lda) : args.stride_a), + params_B(args.ldb ? make_Coord_with_padding(args.ldb) : args.stride_b), + params_C(args.ldc ? make_Coord_with_padding(args.ldc) : args.stride_c), + params_D(args.ldd ? make_Coord_with_padding(args.ldd) : args.stride_d), + output_op(args.epilogue), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C(const_cast(args.ptr_C)), + ptr_D(args.ptr_D), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C(args.batch_stride_C), + ptr_gather_A_indices(const_cast(args.ptr_gather_A_indices)), + ptr_gather_B_indices(const_cast(args.ptr_gather_B_indices)), + ptr_scatter_D_indices(const_cast(args.ptr_scatter_D_indices)) + {} + + /// Lightweight update given a subset of arguments. + void update(Arguments const &args) + { + CUTLASS_TRACE_HOST("GemmUniversal::Params::update()"); + + // Update input/output pointers + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + this->batch_stride_D = args.batch_stride_D; + + ptr_gather_A_indices = const_cast(args.ptr_gather_A_indices); + ptr_gather_B_indices = const_cast(args.ptr_gather_B_indices); + ptr_scatter_D_indices = const_cast(args.ptr_scatter_D_indices); + + output_op = args.epilogue; + } + + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) + { + CUTLASS_TRACE_HOST("GemmUniversal::can_implement()"); + + static int const kAlignmentA = (cute::is_same>::value) + ? 32 + : (cute::is_same>::value) + ? 64 + : Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = (cute::is_same>::value) + ? 32 + : (cute::is_same>::value) + ? 64 + : Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = (cute::is_same>::value) + ? 32 + : (cute::is_same>::value) + ? 64 + : Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (cute::is_same::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } else if (cute::is_same::value) { + isAMisaligned = problem_size.m() % kAlignmentA; + } else if (cute::is_same>::value + || cute::is_same>::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } + + if (cute::is_same::value) { + isBMisaligned = problem_size.n() % kAlignmentB; + } else if (cute::is_same::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } else if (cute::is_same>::value + || cute::is_same>::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } + + if (cute::is_same::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } else if (cute::is_same::value) { + isCMisaligned = problem_size.m() % kAlignmentC; + } else if (cute::is_same>::value + || cute::is_same>::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } + + if (isAMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); + return Status::kErrorMisalignedOperand; + } + + if (isBMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); + return Status::kErrorMisalignedOperand; + } + + if (isCMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); + return Status::kErrorMisalignedOperand; + } + + CUTLASS_TRACE_HOST(" returning kSuccess"); + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmUniversal op; + op(params, shared_storage); + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + ThreadblockSwizzle threadblock_swizzle; + run_with_swizzle(params, shared_storage, threadblock_swizzle); + } + + /// Executes one GEMM with an externally-provided swizzling function + CUTLASS_DEVICE + void run_with_swizzle(Params const ¶ms, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) { + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; + ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[threadblock_tile_offset.k()]; + ptr_B = static_cast(params.ptr_B)[threadblock_tile_offset.k()]; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_B{ + offset_k, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A, + params.ptr_gather_A_indices); + + typename Mma::IteratorB iterator_B( + params.params_B, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B, + params.ptr_gather_B_indices); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + + // + // Fetch pointers based on mode. + // + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[threadblock_tile_offset.k()]; + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.problem_size.mn(), + thread_idx, + threadblock_offset, + params.ptr_scatter_D_indices + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset, + params.ptr_scatter_D_indices + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + } + + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + // + // Release the semaphore + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal.hpp new file mode 100644 index 0000000000000000000000000000000000000000..4e046ddd3e6ea3b263cba29c01aefa1bfa8a4017 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal.hpp @@ -0,0 +1,75 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/gemm/kernel/tile_scheduler.hpp" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::gemm::kernel { + +//////////////////////////////////////////////////////////////////////////////// + +/* + * Stateless universal device GEMM kernel type that treats GEMM as + * a composition of a collective mainloop and a collective epilogue. + * + * Supports both the 2.x and 3.x APIs based on whether the first type is + * a cute::tuple<> or not. + * 2.x API implementation: cutlass/gemm/kernel/gemm_universal.h + * 3.x API implementation: cutlass/gemm/kernel/gemm_*.hpp + * + * In the following declaration, the name preceding the 'Or' refers to + * 3.x API type argument order, and the name succeeding the 'Or' refers to + * 2.x API type argument order. Template arguments without two names + * belong to the 3.x API only. +**/ +template < + class ProblemShapeOrThreadblockMma_, // (m, n, k) or (m, n, k, l) + class CollectiveMainloopOrEpilogue_, + class CollectiveEpilogueOrThreadblockSwizzle_, + class TileScheduler_ = void, + class Enable = void +> +class GemmUniversal; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::gemm::kernel + +//////////////////////////////////////////////////////////////////////////////// + +#include "cutlass/gemm/kernel/sm70_gemm.hpp" +#include "cutlass/gemm/kernel/sm90_gemm_tma.hpp" +#include "cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp" +#include "cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_pingpong.hpp" +#include "cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_cooperative.hpp" +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal_streamk.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal_streamk.h new file mode 100644 index 0000000000000000000000000000000000000000..c52a15c3c2600096e3467281374a187f40145b95 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal_streamk.h @@ -0,0 +1,1174 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/barrier.h" +#include "cutlass/block_striped.h" + +#include "cutlass/trace.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock mapping function +> +struct GemmUniversalStreamk { +public: + + + // + // Types and constants + // + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + /// The per-thread tile of raw accumulators + using AccumulatorTile = typename Mma::FragmentC; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Workspace bytes per thread block + static size_t const kWorkspaceBytesPerBlock = + __NV_STD_MAX( + kThreadCount * sizeof(AccumulatorTile), + Epilogue::kWorkspaceBytesPerBlock); + + /// Block-striped reduction utility + using BlockStripedReduceT = BlockStripedReduce; + + + + // + // Structures + // + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord problem_size; + int batch_count; // Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + + typename LayoutA::Stride stride_a; + typename LayoutB::Stride stride_b; + typename LayoutC::Stride stride_c; + typename LayoutC::Stride stride_d; + + typename LayoutA::Stride::LongIndex lda; + typename LayoutB::Stride::LongIndex ldb; + typename LayoutC::Stride::LongIndex ldc; + typename LayoutC::Stride::LongIndex ldd; + + int avail_sms; /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) + + + // + // Methods + // + + /// Default Constructor + Arguments(): + mode(GemmUniversalMode::kGemm), + batch_count(1), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + avail_sms(-1) + {} + + /// Constructor + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K) + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + typename LayoutA::Stride stride_a, + typename LayoutB::Stride stride_b, + typename LayoutC::Stride stride_c, + typename LayoutC::Stride stride_d, + int avail_sms = -1 /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) + ): + mode(mode), + problem_size(problem_size), + batch_count(batch_split), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D), + stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d), avail_sms(avail_sms) + { + CUTLASS_TRACE_HOST("GemmUniversalStreamk::Arguments::Arguments() - problem_size: " << problem_size); + } + + /// Constructor + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K) + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + typename LayoutA::Stride::LongIndex lda, + typename LayoutB::Stride::LongIndex ldb, + typename LayoutC::Stride::LongIndex ldc, + typename LayoutC::Stride::LongIndex ldd, + int avail_sms = -1 /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) + ): + mode(mode), + problem_size(problem_size), + batch_count(batch_split), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D), + lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), avail_sms(avail_sms) + { + stride_a = make_Coord(lda); + stride_b = make_Coord(ldb); + stride_c = make_Coord(ldc); + stride_d = make_Coord(ldd); + CUTLASS_TRACE_HOST("GemmUniversalStreamk::Arguments::Arguments() - problem_size: " << problem_size); + } + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const + { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.stride_a, args.stride_b); + std::swap(args.batch_stride_A, args.batch_stride_B); + + return args; + } + }; + + + /// Parameters structure + struct Params + { + public: + + // + // Data members + // + + void * ptr_A; + void * ptr_B; + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + + int64_t batch_stride_A; + int64_t batch_stride_B; + + GemmUniversalMode mode; + + ThreadblockSwizzle block_mapping; + + void *barrier_workspace; + void *partials_workspace; + + typename EpilogueOutputOp::Params output_op; + + void * ptr_D; + void * ptr_C; + + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::OutputTileIterator::Params params_C; + + int64_t batch_stride_D; + int64_t batch_stride_C; + + + protected: + + // + // Host-only dispatch-utilities + // + + /// Pad the given allocation size up to the nearest cache line + static size_t cacheline_align_up(size_t size) + { + static const int CACHELINE_SIZE = 128; + return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE; + } + + /// Get the workspace size needed for barrier + size_t get_barrier_workspace_size() const + { + // For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction, + // each reduction block needs its own synchronization flag. + int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); + int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks); + + return cacheline_align_up(sizeof(typename Barrier::T) * num_flags); + } + + /// Get the workspace size needed for intermediate partial sums + size_t get_partials_workspace_size() const + { + int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); + return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks); + } + + + public: + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + params_A(args.lda ? make_Coord_with_padding(args.lda) : args.stride_a), + params_B(args.ldb ? make_Coord_with_padding(args.ldb) : args.stride_b), + params_C(args.ldc ? make_Coord_with_padding(args.ldc) : args.stride_c), + params_D(args.ldd ? make_Coord_with_padding(args.ldd) : args.stride_d), + output_op(args.epilogue), + mode(args.mode), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C(const_cast(args.ptr_C)), + ptr_D(args.ptr_D), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C(args.batch_stride_C), + batch_stride_D(args.batch_stride_D), + barrier_workspace(nullptr), + partials_workspace(nullptr) + { + // Number of SMs to make available for StreamK decomposition + int avail_sms = (args.avail_sms == -1) ? + device_sms : + fast_min(args.avail_sms, device_sms); + + // Initialize the block mapping structure + block_mapping = ThreadblockSwizzle( + typename ThreadblockSwizzle::template KernelTraits(), + args.mode, + args.problem_size, + {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, + args.batch_count, + sm_occupancy, + device_sms, + avail_sms); + } + + + /// Returns the workspace size (in bytes) needed for these parameters + size_t get_workspace_size() const + { + return + get_barrier_workspace_size() + + get_partials_workspace_size(); + } + + + /// Assign and initialize the specified workspace buffer. Assumes + /// the memory allocated to workspace is at least as large as get_workspace_size(). + Status init_workspace( + void *workspace, + cudaStream_t stream = nullptr) + { + uint8_t *ptr = static_cast(workspace); + + // Establish partials workspace + partials_workspace = nullptr; + size_t partials_workspace_bytes = get_partials_workspace_size(); + if (partials_workspace_bytes > 0) + { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + partials_workspace = ptr; + ptr += partials_workspace_bytes; + } + + // Establish barrier workspace + barrier_workspace = nullptr; + size_t barrier_workspace_bytes = get_barrier_workspace_size(); + if (barrier_workspace_bytes > 0) + { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + barrier_workspace = ptr; + ptr += barrier_workspace_bytes; + } + + // Zero-initialize barrier workspace + if (barrier_workspace) + { + size_t barrier_workspace_bytes = get_barrier_workspace_size(); + + CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes"); + + cudaError_t result = cudaMemsetAsync( + barrier_workspace, + 0, + barrier_workspace_bytes, + stream); + + if (result != cudaSuccess) { + CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result)); + return Status::kErrorInternal; + } + } + + return Status::kSuccess; + } + + + /// Returns the GEMM volume in thread block tiles + cutlass::gemm::GemmCoord get_tiled_shape() const + { + return block_mapping.tiled_shape(); + } + + + /// Returns the total number of thread blocks to launch + int get_grid_blocks() const + { + dim3 grid_dims = get_grid_dims(); + return grid_dims.x * grid_dims.y * grid_dims.z; + } + + + /// Returns the grid extents in thread blocks to launch + dim3 get_grid_dims() const + { + return block_mapping.get_grid_dims(); + } + + + /// Lightweight update given a subset of arguments. + void update(Arguments const &args) + { + CUTLASS_TRACE_HOST("GemmUniversalStreamK::Params::update()"); + + // Update input/output pointers + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + batch_stride_D = args.batch_stride_D; + + output_op = args.epilogue; + } + + }; + + /// Tile work descriptor + struct TileWorkDesc + { + /// The linear tile index + int tile_idx; + + /// The location of this tile (in threadblock-tile coordinates) in the output matrix + cutlass::gemm::GemmCoord tiled_coord; + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + int iter_begin; + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + int k_begin; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + int k_end; + + /// The number of remaining MAC-iterations this threadblock will perform for this tile + int k_iters_remaining; + + // Whether this block will perform the first iteration of this tile + CUTLASS_DEVICE + bool tile_started() + { + return (k_begin == 0); + } + + // Whether this block will perform the last iteration of this tile + CUTLASS_DEVICE + bool tile_finished(Params const ¶ms) + { + return (k_end == params.block_mapping.problem_size.k()); + } + }; + + + /// Shared memory storage structure + union SharedStorage + { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + +protected: + + // + // Data members + // + + /// GEMM problem parameters + Params params; + + /// Shared storage reference + SharedStorage &shared_storage; + + /// ID within the threadblock + int thread_idx; + + /// ID of warp + int warp_idx; + + /// ID of each thread within a warp + int lane_idx; + + /// Threadblock scoped epilogue + Epilogue epilogue; + + +public: + + // + // Host-only dispatch API + // + + /// Determines whether the GEMM problem size satisfies this kernel's + /// alignment requirements + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) + { + CUTLASS_TRACE_HOST("GemmUniversalStreamk::can_implement()"); + + static int const kAlignmentA = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (platform::is_same::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } else if (platform::is_same::value) { + isAMisaligned = problem_size.m() % kAlignmentA; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } + + if (platform::is_same::value) { + isBMisaligned = problem_size.n() % kAlignmentB; + } else if (platform::is_same::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } + + if (platform::is_same::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } else if (platform::is_same::value) { + isCMisaligned = problem_size.m() % kAlignmentC; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } + + if (isAMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); + return Status::kErrorMisalignedOperand; + } + + if (isBMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); + return Status::kErrorMisalignedOperand; + } + + if (isCMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); + return Status::kErrorMisalignedOperand; + } + + CUTLASS_TRACE_HOST(" returning kSuccess"); + + return Status::kSuccess; + } + + /// Determines whether the GEMM problem satisfies this kernel's + /// alignment requirements + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + +protected: + + // + // Device-only utility methods + // + + /// Iterator for fetching tile fragments from A + CUTLASS_DEVICE + typename Mma::IteratorA init_iterator_A( + TileWorkDesc &tile_work, + GemmUniversalMode mode) + { + // The input A matrix + ElementA *ptr_A = static_cast(params.ptr_A); + + // Update input pointers based on batched/array mode + if (mode == GemmUniversalMode::kBatched) { + ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A; + } + if (mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[tile_work.tiled_coord.k()]; + } + + int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM; + int m_end = params.block_mapping.problem_size.m(); + return Mma::IteratorA( + params.params_A, + ptr_A, + { m_end, tile_work.k_end }, + threadIdx.x, + { m_begin, tile_work.k_begin }); + + } + + + /// Iterator for fetching tile fragments from B + CUTLASS_DEVICE + typename Mma::IteratorB init_iterator_B( + TileWorkDesc &tile_work, + GemmUniversalMode mode) + { + // The input B matrix + ElementB *ptr_B = static_cast(params.ptr_B); + + // Update input pointers based on batched/array mode + if (mode == GemmUniversalMode::kBatched) { + ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B; + } + if (mode == GemmUniversalMode::kArray) { + ptr_B = static_cast(params.ptr_B)[tile_work.tiled_coord.k()]; + } + + int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN; + int n_end = params.block_mapping.problem_size.n(); + return Mma::IteratorB( + params.params_B, + ptr_B, + { tile_work.k_end, n_end }, + threadIdx.x, + { tile_work.k_begin, n_begin }); + } + + + CUTLASS_DEVICE + void init_dp_tile_work( + TileWorkDesc &tile_work, + int tile_idx) + { + // The linear tile index + tile_work.tile_idx = tile_idx; + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile(); + + // The number of MAC-iterations this threadblock will perform for this tile + tile_work.k_iters_remaining = params.block_mapping.iters_per_tile(); + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_begin = 0; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_end = params.block_mapping.problem_size.k(); + + // The location of this tile (in threadblock-tile coordinates) in the output matrix + tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); + } + + + CUTLASS_DEVICE + void init_sk_tile_work( + TileWorkDesc &tile_work, + int tile_idx, + int block_iter_begin, + int block_iter_end) + { + // The linear tile index + tile_work.tile_idx = tile_idx; + + // The first global-scoped MAC-iteration for this tile + int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile(); + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + tile_work.iter_begin = max(block_iter_begin, tile_iter_begin); + + // The first tile-scoped MAC-iteration this threadblock will perform for this tile + int k_iter_begin = tile_work.iter_begin - tile_iter_begin; + + // The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile + int k_iter_end = block_iter_end - tile_iter_begin; + + // The number of MAC-iterations this threadblock will perform for this tile + tile_work.k_iters_remaining = k_iter_end - k_iter_begin; + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_begin = k_iter_begin * Mma::Shape::kK; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_end = min( + params.block_mapping.problem_size.k(), // extent of k domain + (k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment + + // The location of this tile (in threadblock-tile coordinates) in the output matrix + tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); + } + + + /// Share accumulators with peers + CUTLASS_DEVICE + void share_accumulators( + AccumulatorTile const &accumulator_tile, + int block_idx, + int first_block_idx) + { + AccumulatorTile *accum_tile_workspace = reinterpret_cast(params.partials_workspace); + + int accum_tile_offset = first_block_idx * kThreadCount; + + if (block_idx == first_block_idx) + { + // First peer initializes the workspace partials + BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); + } + else + { + // Subsequent peers atomically accumulate into the workspace partials + if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) + { + // Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them + Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1); + } + else + { + // Turnstile reduction order: wait until the previous peer has written + int wait_count = block_idx - first_block_idx; + Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count); + } + + // Perform reduction in workspace + BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); + } + + // Signal our arrival + Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx); + } + + + /// Acquire accumulators from peers + CUTLASS_DEVICE + void acquire_accumulators( + AccumulatorTile &accumulator_tile, + int block_idx, + int first_block_idx) + { + AccumulatorTile *accum_tile_workspace = reinterpret_cast(params.partials_workspace); + + // Wait for arrival + int num_carry_in = block_idx - first_block_idx; + Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in); + + // Load and add peer-partials accumulator tile to local accumulator tile + int accum_tile_offset = first_block_idx * kThreadCount; + BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx); + } + + + /// Perform epilogue computations and output + CUTLASS_DEVICE + void do_epilogue( + TileWorkDesc &tile_work, + AccumulatorTile &accumulator_tile) + { + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + + // Update pointers for batched/array mode(s) + if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += tile_work.tiled_coord.k() * params.batch_stride_C; + ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D; + } + if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[tile_work.tiled_coord.k()]; + ptr_D = static_cast(params.ptr_D)[tile_work.tiled_coord.k()]; + } + + // Location of this tile in item-coords + MatrixCoord threadblock_item_begin( + tile_work.tiled_coord.m() * Mma::Shape::kM, + tile_work.tiled_coord.n() * Mma::Shape::kN + ); + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Execute the epilogue operator to update the destination tensor. + epilogue( + EpilogueOutputOp(params.output_op), + iterator_D, + accumulator_tile, + iterator_C); + } + + + CUTLASS_DEVICE + void separate_reduction(int reduce_idx) + { + int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx; + + // Reduce by sk-tile (every tile contributed to by one or more blocks) + reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments; + reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments; + + int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile(); + int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1; + + peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first); + peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last); + + // Wait for peers to complete + int peer_idx_end = peer_idx_last + 1; + int num_peers = peer_idx_end - peer_idx_begin; + Barrier::wait_eq_reset( + params.barrier_workspace, + thread_idx, + (reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx, + num_peers); + + /// The location of this tile (in threadblock-tile coordinates) in the output matrix + GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx); + + // Location of this tile in item-coords + MatrixCoord threadblock_item_begin( + tiled_coord.m() * Mma::Shape::kM, + tiled_coord.n() * Mma::Shape::kN + ); + + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Execute the epilogue operator to update the destination tensor. + epilogue.reduce( + peer_idx_begin, + peer_idx_end, + reduce_fragment_idx, + params.partials_workspace, + EpilogueOutputOp(params.output_op), + iterator_D, + iterator_C); + } + + + CUTLASS_DEVICE + void process_tile( + TileWorkDesc tile_work, + int block_idx, + int dp_start_block_idx, + int block_iter_begin) + { + // Initialize input iterators + typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode); + typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode); + + // Initialize accumulators + AccumulatorTile accumulator_tile; + accumulator_tile.clear(); + + // Initialize MMA abstraction + Mma mma( + shared_storage.main_loop, + thread_idx, + warp_idx, + lane_idx); + + // Perform this tile's range of multiply-accumulate (MAC) iterations + mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile); + + if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) || + (params.block_mapping.reduction_blocks == 0) || + (block_idx >= dp_start_block_idx)) + { + // + // Cooperative SK peer reduction or DP block + // + + int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx); + + if (!tile_work.tile_finished(params)) { + // Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace + share_accumulators(accumulator_tile, block_idx, first_block_idx); + } + else + { + // DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile + if (!tile_work.tile_started()) + { + // A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks + acquire_accumulators(accumulator_tile, block_idx, first_block_idx); + } + + do_epilogue(tile_work, accumulator_tile); + } + } + else + { + // + // Separate peer reduction + // + + // Share accumulator partial sums with peer threadblock(s) through scratch workspace + epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started()); + + // Signal arrival + Barrier::arrive_range_inc( + params.barrier_workspace, + thread_idx, + tile_work.tile_idx * Epilogue::kAccumulatorFragments, + Epilogue::kAccumulatorFragments); + } + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void gemm() + { + // Initialize block's iteration range + int tile_idx = 0; + int block_iter_begin = 0; + int block_iters_remaining = 0; + + int block_idx = params.block_mapping.get_block_idx(); + + int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region(); + int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms; + int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks; + int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks; + + // Initialize tile work descriptor + TileWorkDesc tile_work; + + bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx); + bool sk_block = (block_idx < sk_padding_start_block_idx); + bool reduce_block = (block_idx >= reduce_start_block_idx) && + (block_idx < grid_padding_start_block_idx) && + (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed); + + if (dp_block) + { + // This is a DP block + int dp_block_idx = block_idx - dp_start_block_idx; + int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles; + + // Blocks in first DP wave get configured number of tiles + tile_idx = first_dp_tile + dp_block_idx; + int tile_allottment = params.block_mapping.dp_first_wave_tiles; + + // Blocks in subsequent DP waves get 1 tile + if (dp_block_idx >= params.block_mapping.avail_sms) { + tile_allottment = 1; + tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms; + } + + block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment; + + init_dp_tile_work(tile_work, tile_idx); + + // DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1) + if ((tile_idx < params.block_mapping.sk_tiles) || + (tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) || + (tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n())) + { + return; + } + } + else if (sk_block) + { + // This is a SK block + int block_iter_end; + params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end); + block_iters_remaining = block_iter_end - block_iter_begin; + + tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1); + init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); + } + else + { + if (reduce_block) + { + // This is a reduction threadblock + int reduce_block_idx = block_idx - reduce_start_block_idx; + separate_reduction(reduce_block_idx); + } + + return; + } + + // Iteration-processing loop body + CUTLASS_PRAGMA_NO_UNROLL + while (true) + { + // Perform this block's share of work for this tile + process_tile( + tile_work, + block_idx, + dp_start_block_idx, + block_iter_begin); + + block_iters_remaining -= tile_work.k_iters_remaining; + + if (block_iters_remaining == 0) + { + break; + } + + // Continue to next tile + __syncthreads(); + + if (block_idx >= dp_start_block_idx) + { + // DP block consume their tiles at stride + tile_idx += params.block_mapping.avail_sms; + init_dp_tile_work(tile_work, tile_idx); + } + else + { + // SK blocks consume their tiles in backwards order + tile_idx--; + init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); + } + } + + } + + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmUniversalStreamk op(params, shared_storage); + op(); + } + + + // Constructor + CUTLASS_DEVICE + GemmUniversalStreamk( + Params const ¶ms, + SharedStorage &shared_storage) + : + params(params), + shared_storage(shared_storage), + thread_idx(threadIdx.x), + warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code + lane_idx(threadIdx.x % 32), + epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx) + {} + + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()() + { + // Generic SK code path + gemm(); + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal_with_visitor.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal_with_visitor.h new file mode 100644 index 0000000000000000000000000000000000000000..2b9f04fdf15a2b6e2657e393b6e817cb3e2fff89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal_with_visitor.h @@ -0,0 +1,321 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Gemm kernel with an epilogue defined under the epilogue visitor concept +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/kernel/gemm_universal.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// Gemm that compute the epilogue visitor functor +template < + typename Mma, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +class GemmWithEpilogueVisitor: GemmUniversal { +public: + + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using Base = GemmUniversal; + using Base::Base; + + using FusionCallbacks = typename Epilogue::FusionCallbacks; + + using ElementA = typename Base::ElementA; + using LayoutA = typename Base::LayoutA; + using ElementB = typename Base::ElementB; + using LayoutB = typename Base::LayoutB; + using ElementC = typename Base::ElementC; + using LayoutC = typename Base::LayoutC; + + using ThreadblockShape = typename Mma::Shape; + + // + // Structures + // + + using SharedStorage = typename Base::SharedStorage; + using Arguments = typename Base::Arguments; + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params : UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB> + { + using ParamsBase = UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB>; + + // + // Data members + // + cute::Shape problem_shape; + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + typename FusionCallbacks::Params output_op; + + void * ptr_A; + void * ptr_B; + + int64_t batch_stride_A; + int64_t batch_stride_B; + + int * ptr_gather_A_indices; + int * ptr_gather_B_indices; + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + ParamsBase(args, device_sms, sm_occupancy), + params_A(args.lda ? make_Coord_with_padding(args.lda) : args.stride_a), + params_B(args.ldb ? make_Coord_with_padding(args.ldb) : args.stride_b), + output_op(FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/)), + problem_shape({args.problem_size.m(), args.problem_size.n(), args.batch_count}), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + ptr_gather_A_indices(const_cast(args.ptr_gather_A_indices)), + ptr_gather_B_indices(const_cast(args.ptr_gather_B_indices)) + { + // Raise error on unsupported modes + assert(args.mode != GemmUniversalMode::kGemmSplitKParallel && "Sm80 EVT does not support SplitKParallel."); + assert(!(args.mode == GemmUniversalMode::kGemm && this->grid_tiled_shape.k() > 1 ) + && "Sm80 EVT does not support SplitKSerial."); + assert(args.mode != GemmUniversalMode::kArray && "Sm80 EVT does not support Array Gemm."); + } + + /// Lightweight update given a subset of arguments. + void update(Arguments const &args) + { + CUTLASS_TRACE_HOST("GemmUniversalwithVisitor::Params::update()"); + + // Update input pointers + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + this->batch_stride_D = args.batch_stride_D; + + ptr_gather_A_indices = const_cast(args.ptr_gather_A_indices); + ptr_gather_B_indices = const_cast(args.ptr_gather_B_indices); + + output_op = FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/); + problem_shape = make_shape(args.problem_size.m(), args.problem_size.n(), args.batch_count); + } + }; + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmWithEpilogueVisitor op; + op(params, shared_storage); + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + ThreadblockSwizzle threadblock_swizzle; + run_with_swizzle(params, shared_storage, threadblock_swizzle); + } + + /// Executes one GEMM with an externally-provided swizzling function + CUTLASS_DEVICE + void run_with_swizzle(Params const ¶ms, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) { + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; + ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_B{ + offset_k, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A, + params.ptr_gather_A_indices); + + typename Mma::IteratorB iterator_B( + params.params_B, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B, + params.ptr_gather_B_indices); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + accumulators); + + // + // Epilogue + // + + threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + Epilogue epilogue( + params.output_op, + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Execute the epilogue operator to update the destination tensor. + epilogue(accumulators, threadblock_tile_offset, params.problem_shape, thread_idx); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h new file mode 100644 index 0000000000000000000000000000000000000000..dd2c52f46b48dc516e20ebb345c01b598930a504 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h @@ -0,0 +1,892 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Gemm kernel with an epilogue defined under the epilogue visitor concept with streamk. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/barrier.h" +#include "cutlass/block_striped.h" + +#include "cutlass/trace.h" +#include "cutlass/gemm/kernel/gemm_universal_streamk.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock mapping function +> +class GemmWithEpilogueVisitorStreamk { +public: + + using Base = GemmUniversalStreamk; + + // + // Types and constants + // + + using Mma = Mma_; + using Epilogue = Epilogue_; + using FusionCallbacks = typename Epilogue::FusionCallbacks; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + /// The per-thread tile of raw accumulators + using AccumulatorTile = typename Mma::FragmentC; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Workspace bytes per thread block + static size_t const kWorkspaceBytesPerBlock = + __NV_STD_MAX( + kThreadCount * sizeof(AccumulatorTile), + Epilogue::kWorkspaceBytesPerBlock); + + /// Block-striped reduction utility + using BlockStripedReduceT = BlockStripedReduce; + + + + // + // Structures + // + + using Arguments = typename Base::Arguments; + + + /// Parameters structure + struct Params + { + public: + + // + // Data members + // + cute::Shape problem_shape; + + void * ptr_A; + void * ptr_B; + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + + int64_t batch_stride_A; + int64_t batch_stride_B; + + GemmUniversalMode mode; + + ThreadblockSwizzle block_mapping; + + void *barrier_workspace; + void *partials_workspace; + + typename FusionCallbacks::Params output_op; + + + void * ptr_D; + void * ptr_C; + + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::OutputTileIterator::Params params_C; + + int64_t batch_stride_D; + int64_t batch_stride_C; + + + protected: + + // + // Host-only dispatch-utilities + // + + /// Pad the given allocation size up to the nearest cache line + static size_t cacheline_align_up(size_t size) + { + static const int CACHELINE_SIZE = 128; + return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE; + } + + /// Get the workspace size needed for barrier + size_t get_barrier_workspace_size() const + { + // For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction, + // each reduction block needs its own synchronization flag. + int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); + int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks); + + return cacheline_align_up(sizeof(typename Barrier::T) * num_flags); + } + + /// Get the workspace size needed for intermediate partial sums + size_t get_partials_workspace_size() const + { + int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); + return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks); + } + + + public: + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + problem_shape({args.problem_size.m(), args.problem_size.n(), args.batch_count}), + params_A(args.lda ? make_Coord_with_padding(args.lda) : args.stride_a), + params_B(args.ldb ? make_Coord_with_padding(args.ldb) : args.stride_b), + params_C(args.ldc ? make_Coord_with_padding(args.ldc) : args.stride_c), + params_D(args.ldd ? make_Coord_with_padding(args.ldd) : args.stride_d), + output_op(FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/)), + mode(args.mode), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C(const_cast(args.ptr_C)), + ptr_D(args.ptr_D), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C(args.batch_stride_C), + batch_stride_D(args.batch_stride_D), + barrier_workspace(nullptr), + partials_workspace(nullptr) + { + // Number of SMs to make available for StreamK decomposition + int avail_sms = (args.avail_sms == -1) ? + device_sms : + fast_min(args.avail_sms, device_sms); + + // Initialize the block mapping structure + block_mapping = ThreadblockSwizzle( + typename ThreadblockSwizzle::template KernelTraits(), + args.mode, + args.problem_size, + {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, + args.batch_count, + sm_occupancy, + device_sms, + avail_sms); + } + + + /// Returns the workspace size (in bytes) needed for these parameters + size_t get_workspace_size() const + { + return + get_barrier_workspace_size() + + get_partials_workspace_size(); + } + + + /// Assign and initialize the specified workspace buffer. Assumes + /// the memory allocated to workspace is at least as large as get_workspace_size(). + Status init_workspace( + void *workspace, + cudaStream_t stream = nullptr) + { + uint8_t *ptr = static_cast(workspace); + + // Establish partials workspace + partials_workspace = nullptr; + size_t partials_workspace_bytes = get_partials_workspace_size(); + if (partials_workspace_bytes > 0) + { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + partials_workspace = ptr; + ptr += partials_workspace_bytes; + } + + // Establish barrier workspace + barrier_workspace = nullptr; + size_t barrier_workspace_bytes = get_barrier_workspace_size(); + if (barrier_workspace_bytes > 0) + { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + barrier_workspace = ptr; + ptr += barrier_workspace_bytes; + } + + // Zero-initialize barrier workspace + if (barrier_workspace) + { + size_t barrier_workspace_bytes = get_barrier_workspace_size(); + + CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes"); + + cudaError_t result = cudaMemsetAsync( + barrier_workspace, + 0, + barrier_workspace_bytes, + stream); + + if (result != cudaSuccess) { + CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result)); + return Status::kErrorInternal; + } + } + + return Status::kSuccess; + } + + + /// Returns the GEMM volume in thread block tiles + cutlass::gemm::GemmCoord get_tiled_shape() const + { + return block_mapping.tiled_shape(); + } + + + /// Returns the total number of thread blocks to launch + int get_grid_blocks() const + { + dim3 grid_dims = get_grid_dims(); + return grid_dims.x * grid_dims.y * grid_dims.z; + } + + + /// Returns the grid extents in thread blocks to launch + dim3 get_grid_dims() const + { + return block_mapping.get_grid_dims(); + } + + + /// Lightweight update given a subset of arguments. + void update(Arguments const &args) + { + CUTLASS_TRACE_HOST("GemmUniversalStreamK::Params::update()"); + + // Update input/output pointers + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + batch_stride_D = args.batch_stride_D; + + output_op = FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/); + problem_shape = make_shape(args.problem_size.m(), args.problem_size.n(), args.batch_count); + } + + }; + + struct TileWorkDesc: Base::TileWorkDesc { + int k_end; + CUTLASS_DEVICE + bool tile_finished(Params const ¶ms) + { + return (k_end == params.block_mapping.problem_size.k()); + } + }; + + // using TileWorkDesc = typename Base::TileWorkDesc; + using SharedStorage = typename Base::SharedStorage; + +protected: + + // + // Data members + // + + /// GEMM problem parameters + Params params; + + /// Shared storage reference + SharedStorage &shared_storage; + + /// ID within the threadblock + int thread_idx; + + /// ID of warp + int warp_idx; + + /// ID of each thread within a warp + int lane_idx; + + /// Threadblock scoped epilogue + Epilogue epilogue; + + +public: + + // + // Host-only dispatch API + // + + /// Determines whether the GEMM problem size satisfies this kernel's + /// alignment requirements + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) + { + return Base::can_implement(problem_size); + } + + /// Determines whether the GEMM problem satisfies this kernel's + /// alignment requirements + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + +protected: + + // + // Device-only utility methods + // + + /// Iterator for fetching tile fragments from A + CUTLASS_DEVICE + typename Mma::IteratorA init_iterator_A( + TileWorkDesc &tile_work, + GemmUniversalMode mode) + { + // The input A matrix + ElementA *ptr_A = static_cast(params.ptr_A); + + // Update input pointers based on batched/array mode + if (mode == GemmUniversalMode::kBatched) { + ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A; + } + if (mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[tile_work.tiled_coord.k()]; + } + + int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM; + int m_end = params.block_mapping.problem_size.m(); + return Mma::IteratorA( + params.params_A, + ptr_A, + { m_end, tile_work.k_end }, + threadIdx.x, + { m_begin, tile_work.k_begin }); + + } + + + /// Iterator for fetching tile fragments from B + CUTLASS_DEVICE + typename Mma::IteratorB init_iterator_B( + TileWorkDesc &tile_work, + GemmUniversalMode mode) + { + // The input B matrix + ElementB *ptr_B = static_cast(params.ptr_B); + + // Update input pointers based on batched/array mode + if (mode == GemmUniversalMode::kBatched) { + ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B; + } + if (mode == GemmUniversalMode::kArray) { + ptr_B = static_cast(params.ptr_B)[tile_work.tiled_coord.k()]; + } + + int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN; + int n_end = params.block_mapping.problem_size.n(); + return Mma::IteratorB( + params.params_B, + ptr_B, + { tile_work.k_end, n_end }, + threadIdx.x, + { tile_work.k_begin, n_begin }); + } + + + CUTLASS_DEVICE + void init_dp_tile_work( + TileWorkDesc &tile_work, + int tile_idx) + { + // The linear tile index + tile_work.tile_idx = tile_idx; + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile(); + + // The number of MAC-iterations this threadblock will perform for this tile + tile_work.k_iters_remaining = params.block_mapping.iters_per_tile(); + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_begin = 0; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_end = params.block_mapping.problem_size.k(); + + // The location of this tile (in threadblock-tile coordinates) in the output matrix + tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); + } + + + CUTLASS_DEVICE + void init_sk_tile_work( + TileWorkDesc &tile_work, + int tile_idx, + int block_iter_begin, + int block_iter_end) + { + // The linear tile index + tile_work.tile_idx = tile_idx; + + // The first global-scoped MAC-iteration for this tile + int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile(); + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + tile_work.iter_begin = max(block_iter_begin, tile_iter_begin); + + // The first tile-scoped MAC-iteration this threadblock will perform for this tile + int k_iter_begin = tile_work.iter_begin - tile_iter_begin; + + // The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile + int k_iter_end = block_iter_end - tile_iter_begin; + + // The number of MAC-iterations this threadblock will perform for this tile + tile_work.k_iters_remaining = k_iter_end - k_iter_begin; + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_begin = k_iter_begin * Mma::Shape::kK; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_end = min( + params.block_mapping.problem_size.k(), // extent of k domain + (k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment + + // The location of this tile (in threadblock-tile coordinates) in the output matrix + tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); + } + + + /// Share accumulators with peers + CUTLASS_DEVICE + void share_accumulators( + AccumulatorTile const &accumulator_tile, + int block_idx, + int first_block_idx) + { + AccumulatorTile *accum_tile_workspace = reinterpret_cast(params.partials_workspace); + + int accum_tile_offset = first_block_idx * kThreadCount; + + if (block_idx == first_block_idx) + { + // First peer initializes the workspace partials + BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); + } + else + { + // Subsequent peers atomically accumulate into the workspace partials + if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) + { + // Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them + Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1); + } + else + { + // Turnstile reduction order: wait until the previous peer has written + int wait_count = block_idx - first_block_idx; + Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count); + } + + // Perform reduction in workspace + BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); + } + + // Signal our arrival + Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx); + } + + + /// Acquire accumulators from peers + CUTLASS_DEVICE + void acquire_accumulators( + AccumulatorTile &accumulator_tile, + int block_idx, + int first_block_idx) + { + AccumulatorTile *accum_tile_workspace = reinterpret_cast(params.partials_workspace); + + // Wait for arrival + int num_carry_in = block_idx - first_block_idx; + Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in); + + // Load and add peer-partials accumulator tile to local accumulator tile + int accum_tile_offset = first_block_idx * kThreadCount; + BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx); + } + + + /// Perform epilogue computations and output + CUTLASS_DEVICE + void do_epilogue( + TileWorkDesc &tile_work, + AccumulatorTile &accumulator_tile) + { + cutlass::gemm::GemmCoord threadblock_tile_offset{ + tile_work.tiled_coord.m(), + tile_work.tiled_coord.n(), + tile_work.tiled_coord.k() + }; + + // Execute the epilogue operator to update the destination tensor. + epilogue( + accumulator_tile, + threadblock_tile_offset, + params.problem_shape, + thread_idx); + } + + + CUTLASS_DEVICE + void separate_reduction(int reduce_idx) + { + int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx; + + // Reduce by sk-tile (every tile contributed to by one or more blocks) + reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments; + reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments; + + int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile(); + int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1; + + peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first); + peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last); + + // Wait for peers to complete + int peer_idx_end = peer_idx_last + 1; + int num_peers = peer_idx_end - peer_idx_begin; + Barrier::wait_eq_reset( + params.barrier_workspace, + thread_idx, + (reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx, + num_peers); + + /// The location of this tile (in threadblock-tile coordinates) in the output matrix + GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx); + + // Execute the epilogue operator to update the destination tensor. + epilogue.reduce( + peer_idx_begin, + peer_idx_end, + reduce_fragment_idx, + params.partials_workspace, + tiled_coord, + params.problem_shape, + thread_idx); + } + + + CUTLASS_DEVICE + void process_tile( + TileWorkDesc tile_work, + int block_idx, + int dp_start_block_idx, + int block_iter_begin) + { + // Initialize input iterators + typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode); + typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode); + + // Initialize accumulators + AccumulatorTile accumulator_tile; + accumulator_tile.clear(); + + // Initialize MMA abstraction + Mma mma( + shared_storage.main_loop, + thread_idx, + warp_idx, + lane_idx); + + // Perform this tile's range of multiply-accumulate (MAC) iterations + mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile); + + if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) || + (params.block_mapping.reduction_blocks == 0) || + (block_idx >= dp_start_block_idx)) + { + // + // Cooperative SK peer reduction or DP block + // + + int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx); + + if (!tile_work.tile_finished(params)) { + // Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace + share_accumulators(accumulator_tile, block_idx, first_block_idx); + } + else + { + // DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile + if (!tile_work.tile_started()) + { + // A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks + acquire_accumulators(accumulator_tile, block_idx, first_block_idx); + } + + do_epilogue(tile_work, accumulator_tile); + } + } + else + { + // + // Separate peer reduction + // + + // Share accumulator partial sums with peer threadblock(s) through scratch workspace + epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started()); + + // Signal arrival + Barrier::arrive_range_inc( + params.barrier_workspace, + thread_idx, + tile_work.tile_idx * Epilogue::kAccumulatorFragments, + Epilogue::kAccumulatorFragments); + } + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void gemm() + { + // Initialize block's iteration range + int tile_idx = 0; + int block_iter_begin = 0; + int block_iters_remaining = 0; + + int block_idx = params.block_mapping.get_block_idx(); + + int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region(); + int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms; + int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks; + int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks; + + // Initialize tile work descriptor + TileWorkDesc tile_work; + + bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx); + bool sk_block = (block_idx < sk_padding_start_block_idx); + bool reduce_block = (block_idx >= reduce_start_block_idx) && + (block_idx < grid_padding_start_block_idx) && + (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed); + + if (dp_block) + { + // This is a DP block + int dp_block_idx = block_idx - dp_start_block_idx; + int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles; + + // Blocks in first DP wave get configured number of tiles + tile_idx = first_dp_tile + dp_block_idx; + int tile_allottment = params.block_mapping.dp_first_wave_tiles; + + // Blocks in subsequent DP waves get 1 tile + if (dp_block_idx >= params.block_mapping.avail_sms) { + tile_allottment = 1; + tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms; + } + + block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment; + + init_dp_tile_work(tile_work, tile_idx); + + // DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1) + if ((tile_idx < params.block_mapping.sk_tiles) || + (tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) || + (tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n())) + { + return; + } + } + else if (sk_block) + { + // This is a SK block + int block_iter_end; + params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end); + block_iters_remaining = block_iter_end - block_iter_begin; + + tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1); + init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); + } + else + { + if (reduce_block) + { + // This is a reduction threadblock + int reduce_block_idx = block_idx - reduce_start_block_idx; + separate_reduction(reduce_block_idx); + } + + return; + } + + // Iteration-processing loop body + CUTLASS_PRAGMA_NO_UNROLL + while (true) + { + // Perform this block's share of work for this tile + process_tile( + tile_work, + block_idx, + dp_start_block_idx, + block_iter_begin); + + block_iters_remaining -= tile_work.k_iters_remaining; + + if (block_iters_remaining == 0) + { + break; + } + + // Continue to next tile + __syncthreads(); + + if (block_idx >= dp_start_block_idx) + { + // DP block consume their tiles at stride + tile_idx += params.block_mapping.avail_sms; + init_dp_tile_work(tile_work, tile_idx); + } + else + { + // SK blocks consume their tiles in backwards order + tile_idx--; + init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); + } + } + + } + + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmWithEpilogueVisitorStreamk op(params, shared_storage); + op(); + } + + + CUTLASS_DEVICE + GemmWithEpilogueVisitorStreamk( + Params const ¶ms, + SharedStorage &shared_storage) + : + params(params), + shared_storage(shared_storage), + thread_idx(threadIdx.x), + warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code + lane_idx(threadIdx.x % 32), + epilogue( + params.output_op, + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx) + {} + + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()() + { + // Generic SK code path + gemm(); + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_with_fused_epilogue.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_with_fused_epilogue.h new file mode 100644 index 0000000000000000000000000000000000000000..1c58b44ef45e34fc7a0be872792bbaa38f0daedf --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_with_fused_epilogue.h @@ -0,0 +1,1508 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Gemm kernel with fused reduction operation. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/layout/layout.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" +#include "cutlass/gemm/kernel/params_universal_base.h" + +#include "cutlass/trace.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + bool IsSingleSource = Epilogue_::kIsSingleSource +> +struct GemmWithFusedEpilogue; + +// GemmWithFusedEpilogue with two sources +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmWithFusedEpilogue { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Split-K preserves splits that are 128b aligned + static int const kSplitKAlignment = const_max( + 128 / sizeof_bits::value, + 128 / sizeof_bits::value + ); + + // + // Structures + // + + /// Argument structure + struct Arguments : UniversalArgumentsBase{ + + // + // Data members + // + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_C1; + void const * ptr_C2; + void * ptr_D; + + void * ptr_Vector; + void * ptr_Tensor; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C1; + int64_t batch_stride_C2; + int64_t batch_stride_Vector; + int64_t batch_stride_Tensor; + + typename LayoutA::Stride::Index lda; + typename LayoutB::Stride::Index ldb; + typename LayoutC::Stride::Index ldc1; + typename LayoutC::Stride::Index ldc2; + typename LayoutC::Stride::Index ldd; + typename LayoutC::Stride::Index ldr; + typename LayoutC::Stride::Index ldt; + + // + // Methods + // + + Arguments(): + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C1(nullptr), + ptr_C2(nullptr), + ptr_D(nullptr) + {} + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C1, + void const * ptr_C2, + void * ptr_D, + void * ptr_Vector, + void * ptr_Tensor, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C1, + int64_t batch_stride_C2, + int64_t batch_stride_D, + int64_t batch_stride_Vector, + int64_t batch_stride_Tensor, + typename LayoutA::Stride::Index lda, + typename LayoutB::Stride::Index ldb, + typename LayoutC::Stride::Index ldc1, + typename LayoutC::Stride::Index ldc2, + typename LayoutC::Stride::Index ldd, + typename LayoutC::Stride::Index ldr, + typename LayoutC::Stride::Index ldt) + : + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C1(ptr_C1), ptr_C2(ptr_C2), ptr_D(ptr_D), + ptr_Vector(ptr_Vector), + ptr_Tensor(ptr_Tensor), + batch_stride_A(batch_stride_A), + batch_stride_B(batch_stride_B), + batch_stride_C1(batch_stride_C1), + batch_stride_C2(batch_stride_C2), + batch_stride_Vector(batch_stride_Vector), + batch_stride_Tensor(batch_stride_Tensor), + lda(lda), ldb(ldb), ldc1(ldc1), ldc2(ldc2), ldd(ldd), ldr(ldr), ldt(ldt) + { + CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + CUTLASS_TRACE_HOST(" ldt: " << this->ldt); + } + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.batch_stride_A, args.batch_stride_B); + + return args; + } + }; + + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params : UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB> + { + using ParamsBase = UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB>; + + // + // Data members + // + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + typename Epilogue::OutputTileIterator::Params params_C1; + typename Epilogue::OutputTileIterator::Params params_C2; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::TensorTileIterator::Params params_Tensor; + typename EpilogueOutputOp::Params output_op; + + void * ptr_A; + void * ptr_B; + void * ptr_C1; + void * ptr_C2; + void * ptr_D; + + void * ptr_Vector; + typename LayoutC::Stride::Index ldr; + + void * ptr_Tensor; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C1; + int64_t batch_stride_C2; + int64_t batch_stride_Vector; + int64_t batch_stride_Tensor; + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + ParamsBase(args, device_sms, sm_occupancy), + params_A(args.lda), + params_B(args.ldb), + params_C1(args.ldc1), + params_C2(args.ldc2), + params_D(args.ldd), + params_Tensor(args.ldt), + output_op(args.epilogue), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C1(const_cast(args.ptr_C1)), + ptr_C2(const_cast(args.ptr_C2)), + ptr_D(args.ptr_D), + ptr_Vector(args.ptr_Vector), + ldr(args.ldr), + ptr_Tensor(args.ptr_Tensor), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C1(args.batch_stride_C1), + batch_stride_C2(args.batch_stride_C2), + batch_stride_Vector(args.batch_stride_Vector), + batch_stride_Tensor(args.batch_stride_Tensor) + { + CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Params::Params() - problem_size: " << problem_size); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + CUTLASS_TRACE_HOST(" ldt: " << args.ldt); + } + + /// Lightweight update given a subset of arguments. + CUTLASS_HOST_DEVICE + void update(Arguments const &args) + { + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C1 = const_cast(args.ptr_C1); + ptr_C2 = const_cast(args.ptr_C2); + ptr_D = args.ptr_D; + + ptr_Vector = args.ptr_Vector; + ldr = args.ldr; + ptr_Tensor = args.ptr_Tensor; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C1 = args.batch_stride_C1; + batch_stride_C2 = args.batch_stride_C2; + batch_stride_Vector = args.batch_stride_Vector; + batch_stride_Tensor = args.batch_stride_Tensor; + this->batch_stride_D = args.batch_stride_D; + + output_op = args.epilogue; + + CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Params::update()"); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + } + }; + + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::can_implement()"); + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (platform::is_same::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } else if (platform::is_same::value) { + isAMisaligned = problem_size.m() % kAlignmentA; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } + + if (platform::is_same::value) { + isBMisaligned = problem_size.n() % kAlignmentB; + } else if (platform::is_same::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } + + if (platform::is_same::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } else if (platform::is_same::value) { + isCMisaligned = problem_size.m() % kAlignmentC; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } + + if (isAMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); + return Status::kErrorMisalignedOperand; + } + + if (isBMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); + return Status::kErrorMisalignedOperand; + } + + if (isCMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); + return Status::kErrorMisalignedOperand; + } + + CUTLASS_TRACE_HOST(" returning kSuccess"); + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmWithFusedEpilogue op; + op(params, shared_storage); + } + + #define SPLIT_K_ENABLED 1 + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + + #if SPLIT_K_ENABLED + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; + ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[threadblock_tile_offset.k()]; + ptr_B = static_cast(params.ptr_B)[threadblock_tile_offset.k()]; + } + #endif + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_B{ + offset_k, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_C1 = static_cast(params.ptr_C1); + ElementC *ptr_C2 = static_cast(params.ptr_C2); + ElementC *ptr_D = static_cast(params.ptr_D); + typename Epilogue::ElementTensor *ptr_Tensor = static_cast(params.ptr_Tensor); + + // Define the reduction output pointer and move to the appropriate place + typename Epilogue::ElementVector *ptr_Vector = + static_cast(params.ptr_Vector); + + // + // Fetch pointers based on mode. + // + + // + // Special path when split-K not enabled. + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() == 1) { + + // Tile iterators loading from source tensors. + typename Epilogue::OutputTileIterator iterator_C1( + params.params_C1, + ptr_C1, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + typename Epilogue::OutputTileIterator iterator_C2( + params.params_C2, + ptr_C2, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Additional tensor to load from + typename Epilogue::TensorTileIterator tensor_iterator( + params.params_Tensor, + // Only the final block outputs Tensor + ptr_Tensor, + params.problem_size.mn(), + thread_idx, + threadblock_offset); + + // Construct the epilogue + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Move to appropriate location for this output tile + if (ptr_Vector) { + ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr; + } + + // Execute the epilogue operator to update the destination tensor. + epilogue(output_op, + ptr_Vector, + iterator_D, + accumulators, + iterator_C1, + iterator_C2, + tensor_iterator, + params.problem_size.mn(), + threadblock_offset); + + return; + } + + // + // Slower path when split-K or batching is needed + // + + + #if SPLIT_K_ENABLED + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_C1 += threadblock_tile_offset.k() * params.batch_stride_C1; + if (ptr_C2) { + ptr_C2 += threadblock_tile_offset.k() * params.batch_stride_C2; + } + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + if (ptr_Tensor) { + ptr_Tensor += threadblock_tile_offset.k() * params.batch_stride_Tensor; + } + if (ptr_Vector) { + ptr_Vector += threadblock_tile_offset.k() * params.batch_stride_Vector; + } + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_C1 = static_cast(params.ptr_C1)[threadblock_tile_offset.k()]; + if (ptr_C2) { + ptr_C2 = static_cast(params.ptr_C2)[threadblock_tile_offset.k()]; + } + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + if (ptr_Tensor) { + ptr_Tensor = static_cast(params.ptr_Tensor)[threadblock_tile_offset.k()]; + } + if (ptr_Vector) { + ptr_Vector = static_cast(params.ptr_Vector)[threadblock_tile_offset.k()]; + } + } + #endif + + // Tile iterators loading from source tensors. + typename Epilogue::OutputTileIterator iterator_C1( + params.params_C1, + ptr_C1, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + typename Epilogue::OutputTileIterator iterator_C2( + params.params_C2, + ptr_C2, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Additional tensor to load from + typename Epilogue::TensorTileIterator tensor_iterator( + params.params_Tensor, + // Only the final block outputs Tensor + ((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) && + (params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1)) + ? nullptr + : ptr_Tensor, + params.problem_size.mn(), + thread_idx, + threadblock_offset); + + // Construct the epilogue + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + #if SPLIT_K_ENABLED + // Wait on the semaphore - this latency may have been covered by iterator construction + if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C1 = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + } + #endif + + // Move to appropriate location for this output tile + if (ptr_Vector) { + ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr; + } + + // Execute the epilogue operator to update the destination tensor. + epilogue(output_op, + // Only the final block uses Vector + ((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) && + (params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1)) + ? nullptr + : ptr_Vector, + iterator_D, + accumulators, + iterator_C1, + iterator_C2, + tensor_iterator, + params.problem_size.mn(), + threadblock_offset); + + // + // Release the semaphore + // + + #if SPLIT_K_ENABLED + if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + #endif + } +}; + +// GemmWithFusedEpilogue with one source +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmWithFusedEpilogue { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Split-K preserves splits that are 128b aligned + static int const kSplitKAlignment = const_max( + 128 / sizeof_bits::value, + 128 / sizeof_bits::value + ); + + // + // Structures + // + + /// Argument structure + struct Arguments : UniversalArgumentsBase + { + // + // Data members + // + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_C; + void * ptr_D; + + void * ptr_Vector; + void * ptr_Tensor; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_Vector; + int64_t batch_stride_Tensor; + + typename LayoutA::Stride::Index lda; + typename LayoutB::Stride::Index ldb; + typename LayoutC::Stride::Index ldc; + typename LayoutC::Stride::Index ldd; + typename LayoutC::Stride::Index ldr; + typename LayoutC::Stride::Index ldt; + + // + // Methods + // + + Arguments(): + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr) + {} + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C, + void * ptr_D, + void * ptr_Vector, + void * ptr_Tensor, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + int64_t batch_stride_Vector, + int64_t batch_stride_Tensor, + typename LayoutA::Stride::Index lda, + typename LayoutB::Stride::Index ldb, + typename LayoutC::Stride::Index ldc, + typename LayoutC::Stride::Index ldd, + typename LayoutC::Stride::Index ldr, + typename LayoutC::Stride::Index ldt) + : + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + ptr_Vector(ptr_Vector), + ptr_Tensor(ptr_Tensor), + batch_stride_A(batch_stride_A), + batch_stride_B(batch_stride_B), + batch_stride_C(batch_stride_C), + batch_stride_Vector(batch_stride_Vector), + batch_stride_Tensor(batch_stride_Tensor), + lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ldr(ldr), ldt(ldt) + { + CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + CUTLASS_TRACE_HOST(" ldt: " << this->ldt); + } + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.batch_stride_A, args.batch_stride_B); + + return args; + } + }; + + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params : UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB> + { + using ParamsBase = UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB>; + + // + // Data members + // + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::TensorTileIterator::Params params_Tensor; + + typename EpilogueOutputOp::Params output_op; + + void * ptr_A; + void * ptr_B; + void * ptr_C; + void * ptr_D; + + void * ptr_Vector; + typename LayoutC::Stride::Index ldr; + + void * ptr_Tensor; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_Vector; + int64_t batch_stride_Tensor; + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + ParamsBase(args, device_sms, sm_occupancy), + params_A(args.lda), + params_B(args.ldb), + params_C(args.ldc), + params_D(args.ldd), + params_Tensor(args.ldt), + output_op(args.epilogue), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C(const_cast(args.ptr_C)), + ptr_D(args.ptr_D), + ptr_Vector(args.ptr_Vector), + ldr(args.ldr), + ptr_Tensor(args.ptr_Tensor), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C(args.batch_stride_C), + batch_stride_Vector(args.batch_stride_Vector), + batch_stride_Tensor(args.batch_stride_Tensor) + { + CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Params::Params() - problem_size: " << problem_size); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + CUTLASS_TRACE_HOST(" ldt: " << args.ldt); + } + + /// Lightweight update given a subset of arguments. + CUTLASS_HOST_DEVICE + void update(Arguments const &args) + { + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + + ptr_Vector = args.ptr_Vector; + ldr = args.ldr; + ptr_Tensor = args.ptr_Tensor; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + batch_stride_Vector = args.batch_stride_Vector; + batch_stride_Tensor = args.batch_stride_Tensor; + this->batch_stride_D = args.batch_stride_D; + + output_op = args.epilogue; + + CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Params::update()"); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + } + }; + + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::can_implement()"); + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (platform::is_same::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } else if (platform::is_same::value) { + isAMisaligned = problem_size.m() % kAlignmentA; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } + + if (platform::is_same::value) { + isBMisaligned = problem_size.n() % kAlignmentB; + } else if (platform::is_same::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } + + if (platform::is_same::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } else if (platform::is_same::value) { + isCMisaligned = problem_size.m() % kAlignmentC; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } + + if (isAMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); + return Status::kErrorMisalignedOperand; + } + + if (isBMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); + return Status::kErrorMisalignedOperand; + } + + if (isCMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); + return Status::kErrorMisalignedOperand; + } + + CUTLASS_TRACE_HOST(" returning kSuccess"); + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmWithFusedEpilogue op; + op(params, shared_storage); + } + + #define SPLIT_K_ENABLED 1 + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + + #if SPLIT_K_ENABLED + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; + ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[threadblock_tile_offset.k()]; + ptr_B = static_cast(params.ptr_B)[threadblock_tile_offset.k()]; + } + #endif + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_B{ + offset_k, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + typename Epilogue::ElementTensor *ptr_Tensor = static_cast(params.ptr_Tensor); + + // Define the reduction output pointer and move to the appropriate place + typename Epilogue::ElementVector *ptr_Vector = + static_cast(params.ptr_Vector); + + // + // Fetch pointers based on mode. + // + + // + // Special path when split-K not enabled. + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() == 1) { + + // Tile iterators loading from source tensors. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Additional tensor to load from + typename Epilogue::TensorTileIterator tensor_iterator( + params.params_Tensor, + // Only the final block outputs Tensor + ptr_Tensor, + params.problem_size.mn(), + thread_idx, + threadblock_offset); + + // Construct the epilogue + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Move to appropriate location for this output tile + if (ptr_Vector) { + ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr; + } + + // Execute the epilogue operator to update the destination tensor. + epilogue(output_op, + ptr_Vector, + iterator_D, + accumulators, + iterator_C, + tensor_iterator, + params.problem_size.mn(), + threadblock_offset); + + return; + } + + // + // Slower path when split-K or batching is needed + // + + + #if SPLIT_K_ENABLED + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + if (ptr_Tensor) { + ptr_Tensor += threadblock_tile_offset.k() * params.batch_stride_Tensor; + } + if (ptr_Vector) { + ptr_Vector += threadblock_tile_offset.k() * params.batch_stride_Vector; + } + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[threadblock_tile_offset.k()]; + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + if (ptr_Tensor) { + ptr_Tensor = static_cast(params.ptr_Tensor)[threadblock_tile_offset.k()]; + } + if (ptr_Vector) { + ptr_Vector = static_cast(params.ptr_Vector)[threadblock_tile_offset.k()]; + } + } + #endif + + // Tile iterators loading from source tensors. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Additional tensor to load from + typename Epilogue::TensorTileIterator tensor_iterator( + params.params_Tensor, + // Only the final block outputs Tensor + ((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) && + (params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1)) + ? nullptr + : ptr_Tensor, + params.problem_size.mn(), + thread_idx, + threadblock_offset); + + // Construct the epilogue + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + #if SPLIT_K_ENABLED + // Wait on the semaphore - this latency may have been covered by iterator construction + if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + } + #endif + + // Move to appropriate location for this output tile + if (ptr_Vector) { + ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr; + } + + // Execute the epilogue operator to update the destination tensor. + epilogue(output_op, + // Only the final block uses Vector + ((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) && + (params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1)) + ? nullptr + : ptr_Vector, + iterator_D, + accumulators, + iterator_C, + tensor_iterator, + params.problem_size.mn(), + threadblock_offset); + + // + // Release the semaphore + // + + #if SPLIT_K_ENABLED + if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + #endif + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_with_k_reduction.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_with_k_reduction.h new file mode 100644 index 0000000000000000000000000000000000000000..863b0c4c29b82a1f816077282c1a8278a5311ce5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_with_k_reduction.h @@ -0,0 +1,704 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/gemm/kernel/params_universal_base.h" + +#include "cutlass/trace.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename EpilogueGemmKReduction_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmWithKReduction { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using EpilogueGemmKReduction = EpilogueGemmKReduction_; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + using LayoutGemmKReduction = cutlass::layout::PitchLinear; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Split-K preserves splits that are 128b aligned + static int const kSplitKAlignment = const_max(128 / sizeof_bits::value, 128 / sizeof_bits::value); + + static int const kReduceKForA = Mma::kReduceKForA; + + // + // Structures + // + + /// Argument structure + struct Arguments : UniversalArgumentsBase + { + // + // Data members + // + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_C; + void * ptr_D; + void * ptr_gemm_k_reduction; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_gemm_k_reduction; + + typename LayoutA::Stride::Index lda; + typename LayoutB::Stride::Index ldb; + typename LayoutC::Stride::Index ldc; + typename LayoutC::Stride::Index ldd; + typename LayoutGemmKReduction::Stride::Index ld_gemm_k_reduction; + + // + // Methods + // + + Arguments() : + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + ptr_gemm_k_reduction(nullptr) + {} + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C, + void * ptr_D, + void * ptr_gemm_k_reduction, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + int64_t batch_stride_gemm_k_reduction, + typename LayoutA::Stride::Index lda, + typename LayoutB::Stride::Index ldb, + typename LayoutC::Stride::Index ldc, + typename LayoutC::Stride::Index ldd, + typename LayoutGemmKReduction::Stride::Index ld_gemm_k_reduction) + : + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), ptr_gemm_k_reduction(ptr_gemm_k_reduction), + batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_gemm_k_reduction(batch_stride_gemm_k_reduction), + lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ld_gemm_k_reduction(ld_gemm_k_reduction) + { + CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); + } + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.batch_stride_A, args.batch_stride_B); + + return args; + } + }; + + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params : UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB> + { + using ParamsBase = UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB>; + + // + // Data members + // + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::Params params_D; + + typename EpilogueOutputOp::Params output_op; + + void * ptr_A; + void * ptr_B; + void * ptr_C; + void * ptr_D; + void * ptr_gemm_k_reduction; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_gemm_k_reduction; + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + ParamsBase(args, device_sms, sm_occupancy), + params_A(args.lda), + params_B(args.ldb), + params_C(args.ldc), + params_D(args.ldd), + output_op(args.epilogue), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C(const_cast(args.ptr_C)), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C(args.batch_stride_C), + batch_stride_gemm_k_reduction(args.batch_stride_gemm_k_reduction), + ptr_D(args.ptr_D), + ptr_gemm_k_reduction(args.ptr_gemm_k_reduction) + {} + + /// Assign and initialize the specified workspace buffer. Assumes + /// the memory allocated to workspace is at least as large as get_workspace_size(). + Status init_workspace( + void *workspace, + cudaStream_t stream = nullptr) + { + CUTLASS_TRACE_HOST("GemmUniversal::Params::Params() - problem_size: " << this->problem_size); + + if (this->mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D = workspace; + ptr_gemm_k_reduction = static_cast(workspace) + + sizeof(ElementC) * size_t(this->batch_stride_D) * size_t(this->grid_tiled_shape.k()); + + return Status::kSuccess; + } + + return ParamsBase::init_workspace(workspace, stream); + } + + /// Returns the workspace size (in bytes) needed for this problem geometry + size_t get_workspace_size() const + { + size_t workspace_bytes = ParamsBase::get_workspace_size(); + + if (this->mode == GemmUniversalMode::kGemmSplitKParallel) + { + // Split-K parallel always requires a temporary workspace + workspace_bytes += + sizeof(ElementC) * + size_t(batch_stride_gemm_k_reduction) * + size_t(this->grid_tiled_shape.k()); + } + + return workspace_bytes; + } + + /// Lightweight update given a subset of arguments. + void update(Arguments const &args) + { + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + ptr_gemm_k_reduction = args.ptr_gemm_k_reduction; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + batch_stride_gemm_k_reduction = args.batch_stride_gemm_k_reduction; + this->batch_stride_D = args.batch_stride_D; + + output_op = args.epilogue; + + CUTLASS_TRACE_HOST("GemmUniversal::Params::update()"); + } + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + CUTLASS_TRACE_HOST("GemmUniversal::can_implement()"); + + static int const kAlignmentA = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (platform::is_same::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } else if (platform::is_same::value) { + isAMisaligned = problem_size.m() % kAlignmentA; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } + + if (platform::is_same::value) { + isBMisaligned = problem_size.n() % kAlignmentB; + } else if (platform::is_same::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } + + if (platform::is_same::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } else if (platform::is_same::value) { + isCMisaligned = problem_size.m() % kAlignmentC; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } + + if (isAMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand A"); + return Status::kErrorMisalignedOperand; + } + + if (isBMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand B"); + return Status::kErrorMisalignedOperand; + } + + if (isCMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand C"); + return Status::kErrorMisalignedOperand; + } + + CUTLASS_TRACE_HOST(" returning kSuccess"); + + return Status::kSuccess; + } + + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmWithKReduction op; + op(params, shared_storage); + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; + ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[threadblock_tile_offset.k()]; + ptr_B = static_cast(params.ptr_B)[threadblock_tile_offset.k()]; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_B{ + offset_k, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + typename Mma::FragmentReduction gemm_k_accumulators; + + gemm_k_accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + accumulators, + gemm_k_accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + ElementC *ptr_gemm_k_reduction = static_cast(params.ptr_gemm_k_reduction); + + // + // Fetch pointers based on mode. + // + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + ptr_gemm_k_reduction += threadblock_tile_offset.k() * params.batch_stride_gemm_k_reduction; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[threadblock_tile_offset.k()]; + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + } + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + if ((kReduceKForA && threadblock_tile_offset.n() == 0) + || (!kReduceKForA && threadblock_tile_offset.m() == 0)) { + + int warp_idx_mn = warp_idx % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN); + int warp_idx_m = warp_idx_mn % Mma::Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Mma::Base::WarpCount::kM; + + if ((kReduceKForA && warp_idx_n == 0) + || (!kReduceKForA && warp_idx_m == 0)) { + + int reduction_warp_idx = kReduceKForA ? warp_idx_m : warp_idx_n; + int reduction_threadblock_offset = kReduceKForA ? threadblock_tile_offset.m() : + threadblock_tile_offset.n(); + int reduction_vector_size = kReduceKForA ? params.problem_size.m() + : params.problem_size.n(); + EpilogueGemmKReduction epilogue_gemm_k_reduction(thread_idx, + reduction_warp_idx, + lane_idx, + reduction_threadblock_offset, + ptr_gemm_k_reduction); + epilogue_gemm_k_reduction( + reduction_vector_size, + gemm_k_accumulators, + params.mode == GemmUniversalMode::kGemm + && (params.grid_tiled_shape.k() > 1) + && (threadblock_tile_offset.k() > 0)); + } + } + + // + // Release the semaphore + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemv.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemv.h new file mode 100644 index 0000000000000000000000000000000000000000..165b4474f42cb0e174da41dfe1540d1d48fbf59d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemv.h @@ -0,0 +1,638 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/tensor_ref.h" + +#include "cutlass/arch/memory.h" +#include "cutlass/arch/cache_operation.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/layout/matrix.h" + +#include "cutlass/numeric_conversion.h" +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA_, + typename LayoutA_, + typename ElementB_, + typename ElementC_, + typename ElementAccumulator_, + typename EpilogueOutputOp_, + int kElementsPerAccess_ = 1, ///< Number of elements involved in a global access. + int kThreadCount_ = 0, ///< Number of threads in the thread block. + /// It will be calculated automatically if set to 0. + int kThreadsPerRow_ = 0 ///< Number of threads in the k dimension. + /// It will be calculated automatically if set to 0. +> +struct Gemv; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Specializations +// +///////////////////////////////////////////////////////////////////////////////////////////////// + +// GEMV for column-major A matrix +template < + typename ElementA_, + typename ElementB_, + typename ElementC_, + typename ElementAccumulator_, + typename EpilogueOutputOp_, + int kElementsPerAccess_, + int kThreadCount_, + int kThreadsPerRow_ +> +struct Gemv < + ElementA_, + layout::ColumnMajor, + ElementB_, + ElementC_, + ElementAccumulator_, + EpilogueOutputOp_, + kElementsPerAccess_, + kThreadCount_, + kThreadsPerRow_ +>{ +public: + + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using TensorRefA = TensorRef; + + using ElementB = ElementB_; + using ElementC = ElementC_; + + using ElementAccumulator = ElementAccumulator_; + using EpilogueOutputOp = EpilogueOutputOp_; + + static ComplexTransform const kTransformA = ComplexTransform::kNone; + static ComplexTransform const kTransformB = ComplexTransform::kNone; + + // thread block shape (kThreadCount, 1, 1) + static int const kThreadCount = (kThreadCount_ <= 0) ? 32 : kThreadCount_; + static int const kThreadsPerRow = (kThreadsPerRow_ <= 0) ? 1 : kThreadsPerRow_; + + static int const kStages = 1; + + static int const kAlignmentA = 1; + static int const kAlignmentB = 1; + static int const kAlignmentC = 1; + + // + // Structures + // + + /// Argument structure + struct Arguments { + MatrixCoord problem_size; + int32_t batch_count; + typename EpilogueOutputOp::Params output_op; + + TensorRefA ref_A; + + ElementB const *ptr_B; + ElementC const *ptr_C; + ElementC *ptr_D; + + int64_t inc_B; + int64_t inc_C; + int64_t inc_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + + // + // Methods + // + + Arguments(): batch_count(0) { } + + Arguments( + MatrixCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params output_op, + TensorRefA ref_A, + void const *ptr_B, + void const *ptr_C, + void *ptr_D, + int64_t inc_B, + int64_t inc_C, + int64_t inc_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D + ): + problem_size(problem_size), + batch_count(batch_count), + output_op(output_op), + ref_A(ref_A), + ptr_B(static_cast(ptr_B)), + ptr_C(static_cast(ptr_C)), + ptr_D(static_cast(ptr_D)), + inc_B(inc_B), + inc_C(inc_C), + inc_D(inc_D), + batch_stride_A(batch_stride_A), + batch_stride_B(batch_stride_B), + batch_stride_C(batch_stride_C), + batch_stride_D(batch_stride_D) + { } + + Arguments( + MatrixCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params output_op, + TensorRefA ref_A, + void const *ptr_B, + void const *ptr_C, + void *ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D + ): + Arguments( + problem_size, + batch_count, + output_op, + ref_A, + ptr_B, + ptr_C, + ptr_D, + 1, + 1, + 1, + batch_stride_A, + batch_stride_B, + batch_stride_C, + batch_stride_D) + { } + + Arguments( + MatrixCoord problem_size, + typename EpilogueOutputOp::Params output_op, + TensorRefA ref_A, + void const *ptr_B, + void const *ptr_C, + void *ptr_D, + int64_t inc_B, + int64_t inc_C, + int64_t inc_D + ): + Arguments( + problem_size, + 1, + output_op, + ref_A, + ptr_B, + ptr_C, + ptr_D, + inc_B, + inc_C, + inc_D, + 1, + 1, + 1, + 1) + { } + + Status update(Arguments const &args) { + output_op = args.output_op; + ref_A = ref_A; + ptr_B = args.ptr_B; + ptr_C = args.ptr_C; + ptr_D = args.ptr_D; + + return Status::kSuccess; + } + }; + + using Params = Arguments; + + /// Shared memory storage structure + union SharedStorage { + + }; + +public: + + // + // Methods + // + + CUTLASS_DEVICE + Gemv() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement(cutlass::MatrixCoord const & problem_size) { + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + + /// Executes one GEMV + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Loop over batch indices + for (int batch_idx = blockIdx.z; batch_idx < params.batch_count; batch_idx += gridDim.z) { + + int i = blockIdx.x * kThreadCount + threadIdx.x; + + ElementA const *ptr_A = params.ref_A.data() + i; + ElementB const *ptr_B = params.ptr_B; + + ptr_A += batch_idx * params.batch_stride_A; + ptr_B += batch_idx * params.batch_stride_B; + + ElementAccumulator accum = ElementAccumulator(); + + // Compute inner product + CUTLASS_PRAGMA_NO_UNROLL + for (int k = 0; k < params.problem_size.column(); ++k) { + + // Fetch from A + ElementA a = ElementA(); + if (i < params.problem_size.row()) { + a = *ptr_A; + } + ptr_A += params.ref_A.stride(0); + + // Fetch from B + ElementB b = *ptr_B; + ptr_B += params.inc_B; + + // Math + accum += ElementAccumulator(a) * ElementAccumulator(b); + } + + // + // Epilogue phase + // + + ElementC const *ptr_C = params.ptr_C + i * params.inc_C + batch_idx * params.batch_stride_C; + ElementC *ptr_D = params.ptr_D + i * params.inc_D + batch_idx * params.batch_stride_D; + + EpilogueOutputOp output_op(params.output_op); + + typename EpilogueOutputOp::FragmentAccumulator accum_fragment; + typename EpilogueOutputOp::FragmentOutput source_fragment; + typename EpilogueOutputOp::FragmentOutput output_fragment; + + accum_fragment[0] = accum; + + if (i < params.problem_size.row()) { + if (output_op.is_source_needed()) { + source_fragment[0] = *ptr_C; + output_fragment = output_op(accum_fragment, source_fragment); + } + else { + output_fragment = output_op(accum_fragment); + } + + *ptr_D = output_fragment[0]; + } + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// GEMV for row-major A matrix +template < + typename ElementA_, + typename ElementB_, + typename ElementC_, + typename ElementAccumulator_, + typename EpilogueOutputOp_, + int kElementsPerAccess_, + int kThreadCount_, + int kThreadsPerRow_ +> +struct Gemv < + ElementA_, + layout::RowMajor, + ElementB_, + ElementC_, + ElementAccumulator_, + EpilogueOutputOp_, + kElementsPerAccess_, + kThreadCount_, + kThreadsPerRow_ +>{ +public: + + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using TensorRefA = TensorRef; + + using ElementB = ElementB_; + using ElementC = ElementC_; + + using ElementAccumulator = ElementAccumulator_; + using EpilogueOutputOp = EpilogueOutputOp_; + + static ComplexTransform const kTransformA = ComplexTransform::kNone; + static ComplexTransform const kTransformB = ComplexTransform::kNone; + + static FloatRoundStyle const Round = cutlass::FloatRoundStyle::round_to_nearest; + + // number of return elements in a global access + static int const kElementsPerAccess = kElementsPerAccess_; + + using FragmentA = Array; + using FragmentB = Array; + using FragmentCompute = Array; + + // thread block shape (kThreadsPerRow, kThreadCount / kThreadsPerRow, 1) + static int const kThreadCount = (kThreadCount_ <= 0) ? 128 : kThreadCount_; + static int const kThreadsPerRow = (kThreadsPerRow_ <= 0) ? + std::min(static_cast(kThreadCount / (kElementsPerAccess * sizeof(ElementA))), 16) + : kThreadsPerRow_; + + // + // Structures + // + + /// Argument structure + struct Arguments { + MatrixCoord problem_size; + int32_t batch_count; + typename EpilogueOutputOp::Params output_op; + + TensorRefA ref_A; + + ElementB const *ptr_B; + ElementC const *ptr_C; + ElementC *ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + + // + // Methods + // + + Arguments(): batch_count(0) { } + + Arguments( + MatrixCoord problem_size, + int32_t batch_count, + typename EpilogueOutputOp::Params output_op, + TensorRefA ref_A, + void const *ptr_B, + void const *ptr_C, + void *ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D + ): + problem_size(problem_size), + batch_count(batch_count), + output_op(output_op), + ref_A(ref_A), + ptr_B(static_cast(ptr_B)), + ptr_C(static_cast(ptr_C)), + ptr_D(static_cast(ptr_D)), + batch_stride_A(batch_stride_A), + batch_stride_B(batch_stride_B), + batch_stride_C(batch_stride_C), + batch_stride_D(batch_stride_D) + { } + + Arguments( + MatrixCoord problem_size, + typename EpilogueOutputOp::Params output_op, + TensorRefA ref_A, + void const *ptr_B, + void const *ptr_C, + void *ptr_D + ): + Arguments( + problem_size, + 1, + output_op, + ref_A, + ptr_B, + ptr_C, + ptr_D, + 1, + 1, + 1, + 1) + { } + + Status update(Arguments const &args) { + problem_size = args.problem_size; + batch_count = args.batch_count; + output_op = args.output_op; + ref_A = ref_A; + ptr_B = args.ptr_B; + ptr_C = args.ptr_C; + ptr_D = args.ptr_D; + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + batch_stride_D = args.batch_stride_D; + + return Status::kSuccess; + } + }; + + using Params = Arguments; + + /// Shared memory storage structure + union SharedStorage { + + }; + +public: + + // + // Methods + // + + CUTLASS_DEVICE + Gemv() {} + + /// Determines whether kernel satisfies alignment + static Status can_implement(cutlass::MatrixCoord const &problem_size) { + if (problem_size.column() % kElementsPerAccess != 0) { + return Status::kErrorMisalignedOperand; + } + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + + /// Executes one GEMV + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Loop over batch indices + for (int batch_idx = blockIdx.z; batch_idx < params.batch_count; batch_idx += gridDim.z) { + int idx_col_k = threadIdx.x; + int idx_row_m = blockIdx.x * blockDim.y + threadIdx.y; + + if (idx_row_m < params.problem_size.row()) { + // problem_size (row = m, column = k) + // matrix A (batch, m, k) + // vector B (batch, 1, k) + // vector C (batch, m, 1) + // vector D (batch, m, 1) + + // move in the batch dimension + ElementA const *ptr_A = params.ref_A.data() + batch_idx * params.batch_stride_A; + ElementB const *ptr_B = params.ptr_B + batch_idx * params.batch_stride_B; + + ElementC const *ptr_C = params.ptr_C + batch_idx * params.batch_stride_C; + ElementC *ptr_D = params.ptr_D + batch_idx * params.batch_stride_D; + + // move in the k dimension + ptr_A += idx_col_k * kElementsPerAccess; + ptr_B += idx_col_k * kElementsPerAccess; + + // move in the m dimension + ptr_A += idx_row_m * params.problem_size.column(); + ptr_C += idx_row_m; + ptr_D += idx_row_m; + + NumericArrayConverter srcA_converter; + NumericArrayConverter srcB_converter; + + ElementAccumulator accum = 0.f; + + FragmentB fragB; + FragmentA fragA; + + int unroll_col_k = 0; + + // rows of the rolling tile + int const tileA_k = kThreadsPerRow * kElementsPerAccess; + + for (; unroll_col_k < params.problem_size.column() / tileA_k * tileA_k; unroll_col_k += tileA_k) { + + // fetch from matrix A + arch::global_load(fragA, (ptr_A + unroll_col_k), true); + + // fetch from vector B + arch::global_load(fragB, (ptr_B + unroll_col_k), true); + + FragmentCompute fragB_Compute = srcB_converter(fragB); + FragmentCompute fragA_Compute = srcA_converter(fragA); + + // Math + CUTLASS_PRAGMA_UNROLL + for (int e = 0; e < kElementsPerAccess; e++) { + accum += fragA_Compute.at(e) * fragB_Compute.at(e); + } + } + + // calculate the rest of K elements + // each thread fetch 1 element each time + for (int k = unroll_col_k + idx_col_k; k < params.problem_size.column(); k += kThreadsPerRow) { + ElementB b = *(ptr_B - idx_col_k * kElementsPerAccess + k); + ElementA a = *(ptr_A - idx_col_k * kElementsPerAccess + k); + + accum += ElementAccumulator(a) * ElementAccumulator(b); + } + + EpilogueOutputOp output_op(params.output_op); + typename EpilogueOutputOp::FragmentOutput source_fragment; + + // prefetch from source matrix C + if (output_op.is_source_needed()) { + source_fragment[0] = *(ptr_C); + } + + typename EpilogueOutputOp::FragmentAccumulator accum_fragment; + typename EpilogueOutputOp::FragmentOutput output_fragment; + + for (int mask = (kThreadsPerRow >> 1); mask > 0; mask >>= 1) { + accum += __shfl_xor_sync(0xFFFFFFFF, accum, mask, 32); + } + + if (idx_col_k == 0) { + accum_fragment[0] = accum; + + if (output_op.is_source_needed()) { + output_fragment = output_op(accum_fragment, source_fragment); + } + else { + output_fragment = output_op(accum_fragment); + } + + *ptr_D = output_fragment[0]; + } + } + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemv_batched_strided.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemv_batched_strided.h new file mode 100644 index 0000000000000000000000000000000000000000..11490daf0c8e7c94f9a2580f9e87b529d09df4aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/gemv_batched_strided.h @@ -0,0 +1,244 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/array.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/gemm/gemm.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +namespace detail +{ + template + struct GemvBatchedStridedEpilogueScaling + { + ElementAlphaBeta const & alpha; + ElementAlphaBeta const & beta; + + CUTLASS_DEVICE + GemvBatchedStridedEpilogueScaling(ElementAlphaBeta& alpha_, ElementAlphaBeta& beta_) : + alpha(alpha_), beta(beta_) + { } + + template + CUTLASS_DEVICE + void operator()(FragmentAccumulator& accumulators, + FragmentCD const& fragment_C, + FragmentCD& fragment_D) const + { + using AccType = typename FragmentAccumulator::value_type; + using CDType = typename FragmentCD::value_type; + + static_assert(FragmentCD::kElements == FragmentAccumulator::kElements, + "Mistmatch in fragment sizes."); + + for (int i = 0; i < FragmentCD::kElements; ++i) + { + if (BetaIsZero) + { + fragment_D[i] = CDType(accumulators[i] * AccType(alpha)); + } + else + { + fragment_D[i] = CDType(accumulators[i] * AccType(alpha) + + AccType(fragment_C[i]) * AccType(beta)); + } + } + } + }; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +CUTLASS_DEVICE void GemvBatchedStridedDevice( + cutlass::gemm::BatchedGemmCoord problem_size, + ElementAlphaBeta alpha, + ElementAlphaBeta beta, + typename GemvKernel::IteratorA::TensorRef ref_A, + typename GemvKernel::IteratorA::TensorRef::LongIndex lda, + typename GemvKernel::IteratorB::TensorRef ref_B, + typename GemvKernel::IteratorB::TensorRef::LongIndex ldb, + typename GemvKernel::IteratorCD::TensorRef ref_C, + typename GemvKernel::IteratorCD::TensorRef::LongIndex ldc, + typename GemvKernel::IteratorCD::TensorRef ref_D, + typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd) +{ + using ThreadBlockGemv = typename GemvKernel::ThreadBlockGemv; + using ThreadBlockSwizzle = typename GemvKernel::ThreadBlockSwizzle; + using EpilogueScale = detail::GemvBatchedStridedEpilogueScaling; + + ThreadBlockSwizzle swizzler; + + // Compute initial location in logical coordinates + BatchedGemmCoord tb_offset = swizzler.get_tile_offset(); + int const batch_idx = swizzler.get_batch_idx(); + + // Offset to the batch + ref_A.add_pointer_offset(batch_idx*lda); + ref_B.add_pointer_offset(batch_idx*ldb); + + // Construct iterators to A and B operands + typename GemvKernel::IteratorA::Params params_A(ref_A.layout()); + typename GemvKernel::IteratorA iterator_A( + params_A, + ref_A.data(), + { 1, problem_size.k() }, + 0, + { 0, 0 }); + + typename GemvKernel::IteratorB::Params params_B(ref_B.layout()); + typename GemvKernel::IteratorB iterator_B( + params_B, + ref_B.data(), + { problem_size.k(), problem_size.n() }, + threadIdx.x, + { 0, tb_offset.n()*ThreadBlockGemv::Shape::kN }); + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + ThreadBlockGemv mma; + + typename ThreadBlockGemv::FragmentC accumulators; + accumulators.clear(); + + // Compute threadblock-scoped gemv + mma(problem_size.mnk(), accumulators, iterator_A, iterator_B, accumulators); + + // + // Epilogue + // + typename GemvKernel::FragmentCD fragment_CD; + + // Load C (skip if beta is zero) + if (!BetaIsZero) + { + tb_offset = swizzler.get_tile_offset(); + ref_C.add_pointer_offset(batch_idx*ldc); + typename GemvKernel::IteratorCD::Params params_C(ref_C.layout()); + typename GemvKernel::IteratorCD iterator_C( + params_C, + ref_C.data(), + { 1, problem_size.n() }, + threadIdx.x, + { 0, tb_offset.n()*ThreadBlockGemv::Shape::kN }); + iterator_C.load(fragment_CD); + } + + // Apply alpha/beta scaling + EpilogueScale epilogue_scale(alpha, beta); + epilogue_scale(accumulators, fragment_CD, fragment_CD); + + // Store D + tb_offset = swizzler.get_tile_offset(); + ref_D.add_pointer_offset(batch_idx*ldd); + typename GemvKernel::IteratorCD::Params params_D(ref_D.layout()); + typename GemvKernel::IteratorCD iterator_D( + params_D, + ref_D.data(), + { 1, problem_size.n() }, + threadIdx.x, + { 0, tb_offset.n()*ThreadBlockGemv::Shape::kN }); + iterator_D.store(fragment_CD); +} + +template +__global__ void GemvBatchedStrided( + cutlass::gemm::BatchedGemmCoord problem_size, + ElementAlphaBeta alpha, + ElementAlphaBeta beta, + typename GemvKernel::IteratorA::TensorRef ref_A, + typename GemvKernel::IteratorA::TensorRef::LongIndex lda, + typename GemvKernel::IteratorB::TensorRef ref_B, + typename GemvKernel::IteratorB::TensorRef::LongIndex ldb, + typename GemvKernel::IteratorCD::TensorRef ref_C, + typename GemvKernel::IteratorCD::TensorRef::LongIndex ldc, + typename GemvKernel::IteratorCD::TensorRef ref_D, + typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd) +{ + GemvBatchedStridedDevice( + problem_size, alpha, beta, ref_A, lda, ref_B, ldb, ref_C, ldc, ref_D, ldd + ); +} + +template +__global__ void GemvBatchedStrided( + cutlass::gemm::BatchedGemmCoord problem_size, + ElementAlphaBeta alpha, + typename GemvKernel::IteratorA::TensorRef ref_A, + typename GemvKernel::IteratorA::TensorRef::LongIndex lda, + typename GemvKernel::IteratorB::TensorRef ref_B, + typename GemvKernel::IteratorB::TensorRef::LongIndex ldb, + typename GemvKernel::IteratorCD::TensorRef ref_D, + typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd) +{ + GemvBatchedStridedDevice( + problem_size, alpha, ElementAlphaBeta(0), ref_A, lda, ref_B, ldb, ref_D, ldd, ref_D, ldd + ); +} + +template +__global__ void GemvBatchedStrided( + cutlass::gemm::BatchedGemmCoord problem_size, + typename GemvKernel::IteratorA::TensorRef ref_A, + typename GemvKernel::IteratorA::TensorRef::LongIndex lda, + typename GemvKernel::IteratorB::TensorRef ref_B, + typename GemvKernel::IteratorB::TensorRef::LongIndex ldb, + typename GemvKernel::IteratorCD::TensorRef ref_D, + typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd) +{ + using ElementAlphaBeta = typename GemvKernel::IteratorCD::Element; + GemvBatchedStridedDevice( + problem_size, ElementAlphaBeta(1), ElementAlphaBeta(0), ref_A, lda, ref_B, ldb, ref_D, ldd, ref_D, ldd + ); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/grouped_problem_visitor.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/grouped_problem_visitor.h new file mode 100644 index 0000000000000000000000000000000000000000..d013af024314ceef39f1fd6a68dff4d5be1768b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/grouped_problem_visitor.h @@ -0,0 +1,463 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Base scheduler for grouped problems +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Enumerated type describing the type of scheduling to perform for the ProblemVisitor +enum class GroupScheduleMode { + // Perform all scheduling on device + kDeviceOnly, + // Precompute on the host the full sequence of problems to access + kHostPrecompute +}; + +/// Visitor class to abstract away the algorithm for iterating over tiles +template +struct BaseGroupedProblemVisitor { + using ThreadblockShape = ThreadblockShape_; + + struct ProblemInfo { + static int32_t const kNoPrefetchEntry = -1; + int32_t problem_idx; + int32_t problem_start; + + CUTLASS_DEVICE + ProblemInfo() : problem_idx(kNoPrefetchEntry), problem_start(kNoPrefetchEntry) {} + + CUTLASS_DEVICE + ProblemInfo(int32_t problem_idx_, int32_t problem_start_) : + problem_idx(problem_idx_), problem_start(problem_start_) {} + }; + + struct Params { + cutlass::gemm::GemmCoord const *problem_sizes; + int32_t problem_count; + void const *workspace; + int32_t tile_count; + + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + Params(): problem_sizes(nullptr), problem_count(0), workspace(nullptr), tile_count(0) { } + + /// Ctor + CUTLASS_HOST_DEVICE + Params( + cutlass::gemm::GemmCoord const *problem_sizes, + int32_t problem_count, + void const *workspace = nullptr, + int32_t tile_count = 0 + ): + problem_sizes(problem_sizes), + problem_count(problem_count), + workspace(workspace), + tile_count(tile_count) + {} + + }; + + Params params; + int32_t tile_idx; + int32_t problem_tile_start; + int32_t problem_idx; + + // + // Methods + // + CUTLASS_DEVICE + BaseGroupedProblemVisitor( + Params const ¶ms_, + int32_t block_idx + ): + params(params_), + tile_idx(block_idx), + problem_tile_start(0), + problem_idx(0) + {} + + /// Get the grid shape + CUTLASS_HOST_DEVICE + static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) { + return ProblemSizeHelper::grid_shape(problem); + } + + /// Gets the global tile index + CUTLASS_HOST_DEVICE + int32_t tile_index() const { + return tile_idx; + } + + /// Gets the index of the problem + CUTLASS_HOST_DEVICE + int32_t problem_index() const { + return problem_idx; + } + + CUTLASS_HOST_DEVICE + int32_t threadblock_idx() const { + return tile_idx - problem_tile_start; + } + + CUTLASS_DEVICE + void advance(int32_t grid_size) { + tile_idx += grid_size; + } + + CUTLASS_HOST_DEVICE + static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) { + ProblemSizeHelper::possibly_transpose_problem(problem); + } + + /// Returns the problem size for the current problem + CUTLASS_HOST_DEVICE + cutlass::gemm::GemmCoord problem_size() const { + GemmCoord problem = params.problem_sizes[problem_idx]; + ProblemSizeHelper::possibly_transpose_problem(problem); + return problem; + } + + CUTLASS_HOST_DEVICE + static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) { + return ProblemSizeHelper::tile_count(grid); + } + + static int32_t group_tile_count(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count) { + int32_t total_tiles = 0; + for (int32_t i = 0; i < problem_count; ++i) { + auto problem = host_problem_sizes_ptr[i]; + possibly_transpose_problem(problem); + auto grid = grid_shape(problem); + total_tiles += tile_count(grid); + } + + return total_tiles; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ProblemSizeHelper, + typename ThreadblockShape, + GroupScheduleMode GroupScheduleMode_, + int PrefetchTileCount, + int ThreadCount +> +struct GroupedProblemVisitor; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// ProblemVisitor that performs all scheduling on device +// +template +struct GroupedProblemVisitor: public BaseGroupedProblemVisitor { + using Base = BaseGroupedProblemVisitor; + using Params = typename Base::Params; + static int const kThreadCount = ThreadCount; + static bool const kRequiresPrecomputation = false; + static int const kThreadsPerWarp = 32; + + struct SharedStorage {}; + + // Final tile of the problem loaded by this thread. Each thread will hold + // a separate value. + int32_t problem_ending_tile; + + SharedStorage &shared_storage; + + // + // Methods + // + CUTLASS_DEVICE + GroupedProblemVisitor( + Params const ¶ms_, + SharedStorage &shared_storage_, + int32_t block_idx + ): Base(params_, block_idx), + problem_ending_tile(0), + shared_storage(shared_storage_) + { + this->problem_idx = -1 * kThreadsPerWarp; + this->problem_tile_start = 0; + } + + CUTLASS_DEVICE + bool next_tile() { + // Check whether the tile to compute is within the range of the current problem. + int32_t problem_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, this->problem_idx % kThreadsPerWarp); + if (this->tile_idx < problem_tile_end) { + return true; + } + + // Check whether the tile to compute is within the current group of problems fetched by the warp. + // The last tile for this group is the final tile of the problem held by the final thread in the warp. + int32_t group_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp-1); + + // Keep the starting problem for this group in `problem_idx`. This is done to reduce + // register pressure. The starting problem for this group is simply the first problem + // in the group most recently fetched by the warp. + int32_t &group_problem_start = this->problem_idx; + group_problem_start = (this->problem_idx / kThreadsPerWarp) * kThreadsPerWarp; + + // Keep the starting tile for this group in `problem_tile_start`. This is done to reduce + // register pressure. + int32_t &group_tile_start = this->problem_tile_start; + + // Each thread in the warp processes a separate problem to advance until + // reaching a problem whose starting tile is less less than tile_idx. + while (group_tile_end <= this->tile_idx) { + group_problem_start += kThreadsPerWarp; + if (group_problem_start > this->params.problem_count) { + return false; + } + + // Since `group_tile_start` is a reference to `this->problem_tile_start`, this + // also sets `this->problem_tile_start`. The fact that `this->problem_tile_start` + // is also set here is used later in `next_tile`. + group_tile_start = group_tile_end; + + int lane_idx = threadIdx.x % kThreadsPerWarp; + int32_t lane_problem = group_problem_start + lane_idx; + + // Compute the number of tiles in the problem assigned to each thread. + problem_ending_tile = 0; + if (lane_problem < this->params.problem_count) { + cutlass::gemm::GemmCoord problem = this->params.problem_sizes[lane_problem]; + this->possibly_transpose_problem(problem); + cutlass::gemm::GemmCoord grid = this->grid_shape(problem); + problem_ending_tile = this->tile_count(grid); + } + + // Compute a warp-wide inclusive prefix sum to compute the ending tile index of + // each thread's problem. + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < kThreadsPerWarp; i <<= 1) { + int32_t val = __shfl_up_sync(0xffffffff, problem_ending_tile, i); + if (lane_idx >= i) { + problem_ending_tile += val; + } + } + + // The total tile count for this group is now in the final position of the prefix sum + int32_t tiles_in_group = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp-1); + + problem_ending_tile += group_tile_start; + group_tile_end += tiles_in_group; + } + + // The next problem to process is the first one that does not have ending tile position + // that is greater than or equal to tile index. + int32_t problem_idx_in_group = + __popc(__ballot_sync(0xffffffff, problem_ending_tile <= this->tile_idx)); + + this->problem_idx = group_problem_start + problem_idx_in_group; + + // The starting tile for this problem is the ending tile of the previous problem. In cases + // where `problem_idx_in_group` is the first problem in the group, we do not need to reset + // `problem_tile_start`, because it is set to the previous group's ending tile in the while + // loop above. + if (problem_idx_in_group > 0) { + this->problem_tile_start = __shfl_sync(0xffffffff, problem_ending_tile, problem_idx_in_group - 1); + } + + return true; + } + + static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, + int32_t problem_count, + int32_t block_count) { + return 0; + } + + static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, + int32_t problem_count, + int32_t block_count, + void* host_workspace_ptr) {} +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Precomputes schedule on host and prefetches into shared memory +// +template +struct GroupedProblemVisitor : public BaseGroupedProblemVisitor { + static_assert(PrefetchTileCount > 0, + "GroupedProblemVisitor with GroupScheduleMode `kHostPrecompute` currently requires prefetching to shared memory"); + + using Base = BaseGroupedProblemVisitor; + using Params = typename Base::Params; + using ProblemInfo = typename Base::ProblemInfo; + static bool const kRequiresPrecomputation = true; + + static int const kPrefetchTileCount = PrefetchTileCount; + static int const kThreadCount = ThreadCount; + + struct SharedStorage { + // Sequence of problem IDs and starting tiles to compute + cutlass::Array prefetched_problems; + }; + + int32_t tiles_computed; + int32_t iterations_per_block; + int32_t block_load_start; + SharedStorage &shared_storage; + ProblemInfo const *problem_info_ptr; + + // + // Methods + // + CUTLASS_DEVICE + GroupedProblemVisitor( + Params const ¶ms_, + SharedStorage &shared_storage_, + int32_t block_idx + ): Base(params_, block_idx), + tiles_computed(0), + shared_storage(shared_storage_), + problem_info_ptr(reinterpret_cast(params_.workspace)) + { + iterations_per_block = (params_.tile_count - 1 + gridDim.x) / gridDim.x; + block_load_start = iterations_per_block * block_idx; + // Start prefetching the first set of tiles to compute + prefetch_tiles(); + } + + CUTLASS_DEVICE + bool next_tile() { + if (this->tile_idx >= this->params.tile_count) { + return false; + } + + int32_t prefetch_idx = (tiles_computed % kPrefetchTileCount); + if (prefetch_idx == 0) { + // Ensure all previous stores to shared memory have been completed + __syncthreads(); + } + + auto problem_info = shared_storage.prefetched_problems[prefetch_idx]; + ++tiles_computed; + + if ((tiles_computed % kPrefetchTileCount) == 0) { + // Begin prefetching next set of tiles. Synchronize first to ensure that + // we don't overwrite the current buffer while someone else is using it. + __syncthreads(); + prefetch_tiles(); + } + + this->problem_idx = problem_info.problem_idx; + this->problem_tile_start = problem_info.problem_start; + + return true; + } + + static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, + int32_t problem_count, + int32_t block_count) { + int32_t total_tiles = Base::group_tile_count(host_problem_sizes_ptr, problem_count); + int32_t entries_per_block = ((total_tiles - 1 + block_count) / block_count); + return sizeof(ProblemInfo) * entries_per_block * block_count; + } +#if !defined(__CUDACC_RTC__) + static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, + int32_t problem_count, + int32_t block_count, + void* host_workspace_ptr) { + ProblemInfo* host_problem_info_ptr = reinterpret_cast(host_workspace_ptr); + int32_t total_tiles = Base::group_tile_count(host_problem_sizes_ptr, problem_count); + int32_t entries_per_block = (total_tiles - 1 + block_count) / block_count; + + int tile = 0; + int start_tile = 0; + for (int p_idx = 0; p_idx < problem_count; ++p_idx) { + auto problem = host_problem_sizes_ptr[p_idx]; + Base::possibly_transpose_problem(problem); + auto grid = Base::grid_shape(problem); + int tiles = Base::tile_count(grid); + ProblemInfo problem_info(p_idx, start_tile); + for (int i = 0; i < tiles; ++i, ++tile) { + host_problem_info_ptr[(entries_per_block * (tile % block_count)) + (tile / block_count)] = problem_info; + } + start_tile += tiles; + } + } +#endif +private: + CUTLASS_DEVICE + void prefetch_tiles() { + CUTLASS_PRAGMA_UNROLL + for (int32_t i = 0; i < kPrefetchTileCount; i += kThreadCount) { + int32_t offset = threadIdx.x + i; + if (offset < kPrefetchTileCount && (tiles_computed + offset < iterations_per_block)) { + shared_storage.prefetched_problems[offset] = problem_info_ptr[block_load_start + tiles_computed + offset]; + } + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/params_universal_base.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/params_universal_base.h new file mode 100644 index 0000000000000000000000000000000000000000..57e86af93ff0b428b4162ac759160b05b43998b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/params_universal_base.h @@ -0,0 +1,273 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Base functionality for common types of universal GEMM kernel parameters +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/trace.h" +#include "cutlass/gemm/gemm.h" + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace util { + +template +CUTLASS_HOST_DEVICE +static bool +is_continous_k_aligned(GemmCoord problem_size, size_t alignmentA, size_t alignmentB) { + return (std::is_same::value && (problem_size.k() % alignmentA) == 0) || + (std::is_same::value && (problem_size.k() % alignmentB) == 0); +} + +} // namespace util + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Argument structure +struct UniversalArgumentsBase +{ + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord problem_size; + int batch_count; + + int64_t batch_stride_D; + + // + // Methods + // + + UniversalArgumentsBase() : + mode(GemmUniversalMode::kGemm), + batch_count(1), + batch_stride_D(0) + {} + + /// constructs an arguments structure + UniversalArgumentsBase( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + int64_t batch_stride_D) + : + mode(mode), + problem_size(problem_size), + batch_count(batch_count), + batch_stride_D(batch_stride_D) + { + CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); + } +}; + + +/// Parameters structure +template < + typename ThreadblockSwizzle, + typename ThreadblockShape, + typename ElementA, + typename ElementB, + typename ElementC, + typename LayoutA, + typename LayoutB> +struct UniversalParamsBase +{ + // + // Data members + // + + GemmCoord problem_size; + GemmCoord grid_tiled_shape; + int swizzle_log_tile; + + GemmUniversalMode mode; + int batch_count; + int gemm_k_size; + + int64_t batch_stride_D; + + int *semaphore; + + + // + // Host dispatch API + // + + /// Default constructor + UniversalParamsBase() = default; + + + /// Constructor + UniversalParamsBase( + UniversalArgumentsBase const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + problem_size(args.problem_size), + mode(args.mode), + batch_count(args.batch_count), + batch_stride_D(args.batch_stride_D), + semaphore(nullptr) + { + init_grid_tiled_shape(); + } + + /// Returns the workspace size (in bytes) needed for this problem geometry + size_t get_workspace_size() const + { + size_t workspace_bytes = 0; + if (mode == GemmUniversalMode::kGemmSplitKParallel) + { + // Split-K parallel always requires a temporary workspace + workspace_bytes = + sizeof(ElementC) * + size_t(batch_stride_D) * + size_t(grid_tiled_shape.k()); + } + else if (mode == GemmUniversalMode::kGemm && grid_tiled_shape.k() > 1) + { + // Serial split-K only requires a temporary workspace if the number of partitions along the + // GEMM K dimension is greater than one. + workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n()); + } + + return workspace_bytes; + } + + + /// Assign and initialize the specified workspace buffer. Assumes + /// the memory allocated to workspace is at least as large as get_workspace_size(). + Status init_workspace( + void *workspace, + cudaStream_t stream = nullptr) + { + semaphore = static_cast(workspace); + // Zero-initialize entire workspace + if (semaphore) + { + size_t workspace_bytes = get_workspace_size(); + + CUTLASS_TRACE_HOST(" Initialize " << workspace_bytes << " workspace bytes"); + + cudaError_t result = cudaMemsetAsync( + semaphore, + 0, + workspace_bytes, + stream); + + if (result != cudaSuccess) { + CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result)); + return Status::kErrorInternal; + } + } + + return Status::kSuccess; + } + + + /// Returns the GEMM volume in thread block tiles + GemmCoord get_tiled_shape() const + { + return grid_tiled_shape; + } + + + /// Returns the total number of thread blocks to launch + int get_grid_blocks() const + { + dim3 grid_dims = get_grid_dims(); + return grid_dims.x * grid_dims.y * grid_dims.z; + } + + + /// Returns the grid extents in thread blocks to launch + dim3 get_grid_dims() const + { + return ThreadblockSwizzle().get_grid_shape(grid_tiled_shape); + } + +private: + CUTLASS_HOST_DEVICE + void init_grid_tiled_shape() { + // Get GEMM volume in thread block tiles + grid_tiled_shape = ThreadblockSwizzle::get_tiled_shape( + problem_size, + {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, + batch_count); + + swizzle_log_tile = ThreadblockSwizzle::get_log_tile(grid_tiled_shape); + + // Determine extent of K-dimension assigned to each block + gemm_k_size = problem_size.k(); + + if (mode == GemmUniversalMode::kGemm || mode == GemmUniversalMode::kGemmSplitKParallel) + { + static const uint32_t CACHELINE_BYTES = 128; + static const size_t element_bytes_a = sizeof(ElementA); + static const size_t element_bytes_b = sizeof(ElementB); + static const size_t cacheline_elements_a = CACHELINE_BYTES / element_bytes_a; + static const size_t cacheline_elements_b = CACHELINE_BYTES / element_bytes_b; + + const bool cacheline_alignment_needed = + util::is_continous_k_aligned(problem_size, cacheline_elements_a, cacheline_elements_b); + + int const kAlignK = const_max( + const_max(128 / sizeof_bits::value, 128 / sizeof_bits::value), + cacheline_alignment_needed ? const_max(cacheline_elements_a, cacheline_elements_b) : 1); + + gemm_k_size = round_up(ceil_div(problem_size.k(), batch_count), kAlignK); + if (gemm_k_size) { + grid_tiled_shape.k() = ceil_div(problem_size.k(), gemm_k_size); + } + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_grouped.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_grouped.h new file mode 100644 index 0000000000000000000000000000000000000000..55955d43319c8b3c6bb5d00eef1c961e9f054ac7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_grouped.h @@ -0,0 +1,704 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Grouped Rank2K kernel. +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/trace.h" +#include "cutlass/gemm/kernel/rank_2k_transpose_operands.h" +#include "cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma1_, ///! Threadblock-scoped matrix multiply-accumulate (A*B^T) + typename Mma2_, ///! Threadblock-scoped matrix multiply-accumulate (B*A^T) + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + ComplexTransform OriginalTransformA_, ///! Public-facing transformation on A + ComplexTransform OriginalTransformB_, ///! Public-facing transformation on B + FillMode FillModeC_, ///! Fill Mode for C (kLower or kUpper) + BlasMode BlasMode_, ///! Blas3 computation mode + GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform + bool Transposed = false +> +struct Rank2KGrouped { +public: + + using Mma1 = Mma1_; + using Mma2 = Mma2_; + + static_assert(platform::is_same::value && + platform::is_same::value, + "Kernel-level grouped Rank2K requires that LayoutC be row major."); + + // Define generic Mma for usecases that use Kernel::Mma + using Mma = Mma1_; + + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_; + static bool const kTransposed = Transposed; + + // Public-facing type definitions related to operand element type, layout, and complex conjugate + // operation. Must interact with the 'kTransposed' notion to reflect the original layout, + // fill mode, etc. passed in. + // + // Recall that a Rank2K operation performs (A x BT) + (B x AT) + // This is performed via: + // Mma1 = (A x BT) + // Mma2 = (B x AT) + // + // However, if C needs to be transposed, then this is changed to the following: + // Mma1 = (B x AT) + // Mma2 = (A x BT) + // + // The transformation above is achieved by swapping the Layouts/Elements/Transforms/etc. + // of A and B as they are passed into the instantiations of Mma1 and Mma2. + // + // Now, given access to only Mma1 and Mma2, as well as whether a transposition has occurred, + // we wish to retrieve the original Layouts/Elements/etc. for A and B that were passed into + // the device-level call. + // + // The logic to do this (which is made clearer by referencing the above instantiations) is as follows: + // LayoutA = kTransposed ? Mma2::LayoutA : Mma1::LayoutA + // LayoutB = kTransposed ? Mma1::LayoutA : Mma2::LayoutA + // + // We achieve this swapping by passing Mma1::*A and Mma2::*B to Rank2KMapArguments: + using MapArgumentsA = kernel::detail::Rank2KMapArguments< + typename Mma1::IteratorA::Element, + typename Mma1::IteratorA::Layout, + Mma1::kTransformA, + Mma1::IteratorA::AccessType::kElements, + typename Mma2::IteratorA::Element, + typename Mma2::IteratorA::Layout, + Mma2::kTransformA, + Mma2::IteratorA::AccessType::kElements, + typename Mma1::LayoutC, + FillModeC_, + kTransposed + >; + + using ElementA = typename MapArgumentsA::ElementA; + using LayoutA = typename MapArgumentsA::LayoutA; + static int const kAlignmentA = MapArgumentsA::kAlignmentA; + + using MapArgumentsB = kernel::detail::Rank2KMapArguments< + typename Mma2::IteratorA::Element, + typename Mma2::IteratorA::Layout, + Mma2::kTransformA, + Mma2::IteratorA::AccessType::kElements, + typename Mma1::IteratorA::Element, + typename Mma1::IteratorA::Layout, + Mma1::kTransformA, + Mma1::IteratorA::AccessType::kElements, + typename Mma2::LayoutC, + FillModeC_, + kTransposed + >; + + using ElementB = typename MapArgumentsB::ElementA; + using LayoutB = typename MapArgumentsB::LayoutA; + static int const kAlignmentB = MapArgumentsB::kAlignmentA; + + // Use the user-provided TransformA and TransformB, rather than those + // resulting from MapArguments, because Mma1 and Mma2 may have different + // complex transforms than those passed in by the user. + // (See kernel/rank_2k_complex.h for an example of this) + static cutlass::ComplexTransform const kTransformA = OriginalTransformA_; + static cutlass::ComplexTransform const kTransformB = OriginalTransformB_; + + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename MapArgumentsA::LayoutC; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + static FillMode const kFillModeC = MapArgumentsA::kFillModeC; + + // Common type definitions for Mma1 and Mma2 + using Operator = typename Mma1::Operator; + using OperatorClass = typename Mma1::Operator::OperatorClass; + using ThreadblockShape = typename Mma1::Shape; + using WarpShape = typename Mma1::Operator::Shape; + using InstructionShape = typename Mma1::Policy::Operator::InstructionShape; + using ArchTag = typename Mma1::ArchTag; + + static int const kStages = Mma1::kStages; + static BlasMode const kBlasMode = BlasMode_; + +private: + static FillMode const kInternalFillModeC = FillModeC_; + +public: + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma1::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + using ProblemVisitor = Rank2KGroupedProblemVisitor< + ThreadblockShape, + kGroupScheduleMode, + kThreadCount, + kThreadCount, + kInternalFillModeC>; + + // + // Structures + // + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord *problem_sizes; + int problem_count; + int threadblock_count; + + typename EpilogueOutputOp::Params epilogue; + + ElementA ** ptr_A; + ElementB ** ptr_B; + ElementC ** ptr_C; + ElementC ** ptr_D; + + typename LayoutA::Stride::LongIndex *lda; + typename LayoutB::Stride::LongIndex *ldb; + typename LayoutC::Stride::LongIndex *ldc; + typename LayoutC::Stride::LongIndex *ldd; + + // Only used by device-level operator + GemmCoord *host_problem_sizes; + + // + // Methods + // + + /// Default ctor + CUTLASS_HOST_DEVICE + Arguments(): + mode(GemmUniversalMode::kGemm), + problem_count(0), + threadblock_count(0), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + lda(nullptr), + ldb(nullptr), + ldc(nullptr), + ldd(nullptr), + host_problem_sizes(nullptr) + { + + } + + /// Ctor + CUTLASS_HOST_DEVICE + Arguments( + GemmUniversalMode mode, + GemmCoord *problem_sizes, + int problem_count, + int threadblock_count, + typename EpilogueOutputOp::Params epilogue, + ElementA ** ptr_A, + ElementB ** ptr_B, + ElementC ** ptr_C, + ElementC ** ptr_D, + typename LayoutA::Stride::LongIndex *lda, + typename LayoutB::Stride::LongIndex *ldb, + typename LayoutC::Stride::LongIndex *ldc, + typename LayoutC::Stride::LongIndex *ldd, + GemmCoord *host_problem_sizes=nullptr + ): + mode(mode), + problem_sizes(problem_sizes), + problem_count(problem_count), + threadblock_count(threadblock_count), + epilogue(epilogue), + ptr_A(ptr_A), + ptr_B(ptr_B), + ptr_C(ptr_C), + ptr_D(ptr_D), + lda(lda), + ldb(ldb), + ldc(ldc), + ldd(ldd), + host_problem_sizes(host_problem_sizes) + { + + } + + }; + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params { + + typename ProblemVisitor::Params problem_visitor; + int threadblock_count; + + typename EpilogueOutputOp::Params output_op; + + GemmUniversalMode mode; + int batch_count; + + ElementA ** ptr_A; + ElementB ** ptr_B; + ElementC ** ptr_C; + ElementC ** ptr_D; + + typename LayoutA::Stride::LongIndex *lda; + typename LayoutB::Stride::LongIndex *ldb; + typename LayoutC::Stride::LongIndex *ldc; + typename LayoutC::Stride::LongIndex *ldd; + + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): + mode(cutlass::gemm::GemmUniversalMode::kGemm), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + lda(nullptr), + ldb(nullptr), + ldc(nullptr), + ldd(nullptr) + { } + + CUTLASS_HOST_DEVICE + Params(Arguments const &args, void *workspace = nullptr, int tile_count = 0): + problem_visitor(args.problem_sizes, args.problem_count, workspace, tile_count), + threadblock_count(args.threadblock_count), + output_op(args.epilogue), + ptr_A(args.ptr_A), + ptr_B(args.ptr_B), + ptr_C(args.ptr_C), + ptr_D(args.ptr_D), + lda(args.lda), + ldb(args.ldb), + ldc(args.ldc), + ldd(args.ldd) + { + + } + + CUTLASS_HOST_DEVICE + void update( + Arguments const &args, + void *workspace = nullptr, + int tile_count = 0) { + + problem_visitor = typename ProblemVisitor::Params(args.problem_sizes, args.problem_count, workspace, tile_count); + threadblock_count = args.threadblock_count; + output_op = args.output_op; + ptr_A = args.ptr_A; + ptr_B = args.ptr_B; + ptr_C = args.ptr_C; + ptr_D = args.ptr_D; + } + }; + + /// Shared memory storage structure + struct SharedStorage { + union { + typename Mma1::SharedStorage mma1_main_loop; + typename Mma2::SharedStorage mma2_main_loop; + typename Epilogue::SharedStorage epilogue; + } kernel; + + // ProblemVisitor shared storage can't be overlapped with others + typename ProblemVisitor::SharedStorage problem_visitor; + }; + +public: + + // + // Methods + // + + CUTLASS_DEVICE + Rank2KGrouped() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) { + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return Status::kSuccess; + } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // + // Problem visitor. + // + + ProblemVisitor problem_visitor( + params.problem_visitor, + shared_storage.problem_visitor, + blockIdx.x); + + // Outer 'persistent' loop to iterate over tiles + while (problem_visitor.next_tile()) { + + GemmCoord problem_size = problem_visitor.problem_size(); + int32_t problem_idx = problem_visitor.problem_index(); + int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx()); + + GemmCoord grid_shape = problem_visitor.grid_shape(problem_size); + + cutlass::gemm::GemmCoord threadblock_tile_offset = problem_visitor.threadblock_offset(threadblock_idx); + + // + // Perform checks to determine whether the results of this threadblock will be needed. + // An example of an unneeded threadblock is one that is assigned to compute in the upper + // portion of a Rank2K kernel filled with mode kLower. + // + // TODO: Consider pushing these checks into ProblemVisitor to avoid spuriously + // returning from `next_tile()`. + // + + // Early exit if threadblock is out of range + if (grid_shape.m() <= threadblock_tile_offset.m() || + grid_shape.n() <= threadblock_tile_offset.n()) { + // Next tile + problem_visitor.advance(gridDim.x); + continue; + } + + // Skip this tile if Fill Mode is Lower and + // if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal) + if (kInternalFillModeC == cutlass::FillMode::kLower && + (threadblock_tile_offset.m() + 1) * Mma1::Shape::kM <= threadblock_tile_offset.n() * Mma1::Shape::kN) { + // Next tile + problem_visitor.advance(gridDim.x); + continue; + } + + // Skip this tile if Fill Mode is Upper and + // if the entire tile is below the main diagonal (top-right corner is at or below the diagonal) + if (kInternalFillModeC == cutlass::FillMode::kUpper && + threadblock_tile_offset.m() * Mma1::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) { + // Next tile + problem_visitor.advance(gridDim.x); + continue; + } + + bool tile_on_diagonal = false; + // Mark tiles that are being crossed by the main diagonal + // (top-right and bottom-left corners are on either side of the diagonal) + if ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM > threadblock_tile_offset.n() * Mma1::Shape::kN + && threadblock_tile_offset.m() * Mma1::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) { + tile_on_diagonal = true; + } + + int offset_k = 0; + int problem_size_k = problem_size.k(); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < grid_shape.k()) { + problem_size_k = (threadblock_tile_offset.k() + 1) * problem_size.k(); + } + + offset_k = threadblock_tile_offset.k() * problem_size.k(); + } + + ElementA *ptr_A = reinterpret_cast((kTransposed ? params.ptr_B[problem_idx] : params.ptr_A[problem_idx])); + typename LayoutA::Stride::LongIndex ldm_A = (kTransposed ? params.ldb[problem_idx] : params.lda[problem_idx]); + + ElementB *ptr_B = reinterpret_cast((kTransposed ? params.ptr_A[problem_idx] : params.ptr_B[problem_idx])); + typename LayoutB::Stride::LongIndex ldm_B = (kTransposed ? params.lda[problem_idx] : params.ldb[problem_idx]); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_MxK{ + threadblock_tile_offset.m() * Mma1::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_KxN{ + offset_k, + threadblock_tile_offset.n() * Mma1::Shape::kN + }; + + // Assume identity swizzle + MatrixCoord tb_offset( + threadblock_tile_offset.m() * Mma1::Shape::kM, + threadblock_tile_offset.n() * Mma1::Shape::kN + ); + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands for Mma1 + typename Mma1::IteratorA iterator_A( + Mma1::IteratorA::Params(ldm_A), + ptr_A, + {problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_MxK); + + typename Mma1::IteratorB iterator_BT( + Mma1::IteratorB::Params(ldm_B), + ptr_B, + {problem_size_k, problem_size.n()}, + thread_idx, + tb_offset_KxN); + + // Construct iterators to A and B operands for Mma2 + typename Mma2::IteratorA iterator_B( + Mma2::IteratorA::Params(ldm_B), + ptr_B, + {problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_MxK); + + typename Mma2::IteratorB iterator_AT( + Mma2::IteratorB::Params(ldm_A), + ptr_A, + {problem_size_k, problem_size.n()}, + thread_idx, + tb_offset_KxN); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply for Mma1 (A x BT) + Mma1 mma1(shared_storage.kernel.mma1_main_loop, thread_idx, warp_idx, lane_idx); + + // Construct thread-scoped matrix multiply for Mma2 (B x AT) + Mma2 mma2(shared_storage.kernel.mma2_main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma1::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK; + + // Wait for all threads to finish their epilogue phases from the previous tile. + __syncthreads(); + + // Compute threadblock-scoped matrix multiply-add (A x BT) + mma1( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_BT, + accumulators); + + // HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK. + if (kBlasMode == BlasMode::kHermitian) { + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * grid_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C[problem_idx]); + ElementC *ptr_D = static_cast(params.ptr_D[problem_idx]); + + // If TB not on diagonal, FillMode doesn't apply. + FillMode kFillModeTB = tile_on_diagonal ? kInternalFillModeC : FillMode::kNone; + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + Epilogue::OutputTileIterator::Params(params.ldc[problem_idx]), + ptr_C, + problem_size.mn(), + thread_idx, + tb_offset, + kFillModeTB + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + Epilogue::OutputTileIterator::Params(params.ldd[problem_idx]), + ptr_D, + problem_size.mn(), + thread_idx, + tb_offset, + kFillModeTB + ); + + Epilogue epilogue( + shared_storage.kernel.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + __syncthreads(); + + accumulators.clear(); + } + + // Compute threadblock-scoped matrix multiply-add (B x AT) + mma2( + gemm_k_iterations, + accumulators, + iterator_B, + iterator_AT, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + /* Needed for HER2K where the second HERK is multiplied by conj(alpha) */ + typename EpilogueOutputOp::Params second_her2k_params(conj(params.output_op.alpha), 1); + EpilogueOutputOp output_op_her2k(second_her2k_params); + + // + // Masked tile iterators constructed from members + // + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * grid_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C[problem_idx]); + + // HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK. + if (kBlasMode == BlasMode::kHermitian) { + ptr_C = static_cast(params.ptr_D[problem_idx]); + } + + ElementC *ptr_D = static_cast(params.ptr_D[problem_idx]); + + // If TB not on diagonal, FillMode doesn't apply. + FillMode kFillModeTB = tile_on_diagonal ? kInternalFillModeC : FillMode::kNone; + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + Epilogue::OutputTileIterator::Params(params.ldc[problem_idx]), + ptr_C, + problem_size.mn(), + thread_idx, + tb_offset, + kFillModeTB + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + Epilogue::OutputTileIterator::Params(params.ldd[problem_idx]), + ptr_D, + problem_size.mn(), + thread_idx, + tb_offset, + kFillModeTB + ); + + Epilogue epilogue( + shared_storage.kernel.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Execute the epilogue operator to update the destination tensor. + if (kBlasMode == BlasMode::kSymmetric) { + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + } else { + epilogue( + output_op_her2k, + iterator_D, + accumulators, + iterator_C); + } + + // Next tile + problem_visitor.advance(gridDim.x); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h new file mode 100644 index 0000000000000000000000000000000000000000..92cc2a732c26d3c91cf05d93cb8a75eee1e26cf1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h @@ -0,0 +1,376 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Problem visitor for grouped Rank2K operations. + + This problem visitor is specialized for Rank2K operations, for which matrix C is upper/lower + triangular. Using a problem visitor designed for GEMMs for Rank2K problems is inefficient + because threadblocks will be frequently assigned to tiles that exit early (e.g., due to + being assigned to a tile in the upper-triangular portion of a lower-triangular problem). + This can lead to load imbalance among threadblocks, as the GEMM-based scheduler + assigns all threadblocks to nearly the same number of tiles, regardless of whether + those tiles exit early. + + Consider an example of a group of four Rank2Ks with matrix C consisting of a grid of 2x2 tiles. + Consider a grid of 8 threadblocks. The default GEMM scheduler will assign threadblocks to + tiles in the following order: + Rank2K 0 Rank2K 1 Rank2K 2 Rank2K 3 + 0 1 4 5 0 1 4 5 + 2 3 6 7 2 3 6 7 + Assuming that the problems are lower triangular, blocks 1 and 5 are continuously assigned + to inactive tiles. + + This problem visitor aims to assign threadblocks to only those tiles which are in the + upper/lower triangular portion of a given problem. Using the example above, the resulting + assignment would be: + Rank2K 0 Rank2K 1 Rank2K 2 Rank2K 3 + 0 - 3 - 6 - 1 - + 1 2 4 5 7 0 2 3 + + Achieving the schedule above requires a mapping from threadblock ID to tile coordinates (i, j). + We will illustrate this by mapping on a lower-triangular matrix with a 3x3 grid. We first + calculate row and column indices assuming one-indexed rows, tiles, and threadblock IDs, and + then subtract one to convert to zero-indexed. + Col 1 Col 2 Col 3 + ---------------------- + Row 1 | 1 - - + Row 2 | 2 3 - + Row 3 | 4 5 6 + + We next outline this mapping, borrowing from: https://stackoverflow.com/a/40954159 + + Calculating row i given threadblock ID t + ---------------------------------------- + For a given row i, all threadblock IDs t in that row satisfy the following: + t <= 1 + 2 + 3 + ... + (i-1) + i + + The closed-form equation for the right-hand side is: i(i+1)/2. + Using this, we can solve for i given t: + t <= i(i+1)/2 + 2t <= i^2 + i + 2t <= i^2 + i + 0.25 - 0.25 + 2t + 0.25 <= i^2 + i + 0.25 + 2t + 0.25 <= (i + 0.5)^2 + sqrt(2t + 0.25) - 0.5 <= i + + To account for fractional values, we set: + i = ceil(sqrt(2t + 0.25) - 0.5) + + To turn this into a zero-indexed row and work with zero-indexed t, we perform: + i = ceil(sqrt(2(t+1) + 0.25) - 0.5) - 1 + = ceil(sqrt(2t + 2.25) - 0.5) - 1 + + Calculating column j given threadblock ID t and row i + ----------------------------------------------------- + For a given row i, all threadblock IDs t in that row also satisfy the following: + t > 1 + 2 + 3 + ... + (i-2) + (i-1) + --> t > i(i-1)/2 + + Threadblock IDs within a given row are sequential, so the one-indexed column ID + for one-indexed threadblock ID t and row i is: + j = t - (i(i-1)/2) + + The zero-indexed version becomes: + j = (t+1) - (i(i+1)/2) -1 + = t - (i(i+1)/2) + + Accounting for non-square grids + ------------------------------- + Though the overall output problem size for Rank2K problems is guranteed to be square, the + grids used in computing may not be square due to using non-square threadblock shapes. For + example, a threadblock shape of 64x32 operating on a problem of output size 128x128 would + result in a grid of 2x4 tiles. + + This case can be handled by noting that the output resembles a square grid of 2x2 "macro tiles" + each of which contains 2 "true tiles." We can thus first map a threadblock ID to its "macro tile" + using the equations above, and then map it to the "true tile" within its "macro tile." In the example + of a 2x4 grid, this mapping would look as follows: + "Macro grid" "True grid" + {0, 1} - 0 1 - - + {2, 3} {4, 5} 2 3 4 5 + + A zero-indexed threadblock ID t is mapped to its "macro tile ID" t_macro as: + t_macro = t // r + Where r is the ratio of the maximum dimension of the grid to the minimum dimension of the grid + (i.e., r = 4 / 2 = 2 in the previous example). + + One uses t_macro and the calculations above to find the row and column in the square matrix to + obtain i_macro and j_macro (zero-indexed). The mapping from (i_macro, j_macro) --> (i, j) + is simply the following: + if (ThreadblockShape::M > ThreadblockShape::N): + r = ThreadblockShape::M / ThreadblockShape::N + i = i_macro + j = (j_macro * r) + (t % r) + elif (ThreadblockShape::M < ThreadblockShape::N): + r = ThreadblockShape::N / ThreadblockShape::M + i = (i_macro * r) + (t % r) + j = j_macro + else: + i = i_macro + j = j_macro + + Handling cases with grid dimensions that aren't multiples of eachother + ---------------------------------------------------------------------- + Even though threadblock shapes M and N are typically multiples of one another, the grid + for a given problem may not have dimensions of the same ratio as that of the threadblock. + For example, a problem of size 132x132 using a threadblock of shape 64x32 will result + in a grid of 3x5 tiles. In this case, there is not an integer number of "true tiles" + per "macro tile." + + When this scenario arises, we simply pad the larger dimension of the grid such that + there are an integer number of "true tiles" per "macro tile." Thus, the 3x5 grid in + the example above will be treated as a 3x6 grid. Row and column positions for each + tile are calculated as above. Any threadblocks that map to tiles that are outside the + problem range or upper/lower triangular portion (e.g., (2, 5)) will exit early from + this problem and may proceed to the next problem in the group. + + Handling upper-triangular matrices + ---------------------------------- + The only modification needed for upper-triangular matrices is to swap i_macro and j_macro + in the calculations above. +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" + +#include "cutlass/gemm/kernel/grouped_problem_visitor.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +namespace detail { +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Helpers for calculating offsets for Rank2K problem visitor. These helpers specifically pertain +// to the conversion from "macro tiles" to "true tiles" in the description above. +// +template < + typename ThreadblockShape, + typename Enable = void +> +struct Rank2KGroupedProblemVisitorOffsetHelper; + +// Partial specialization for the case where threadblock shape M > threadblock shape N +template < + typename ThreadblockShape +> +struct Rank2KGroupedProblemVisitorOffsetHelper< + ThreadblockShape, + typename platform::enable_if< (ThreadblockShape::kM > ThreadblockShape::kN) >::type +> { + static_assert(ThreadblockShape::kM % ThreadblockShape::kN == 0, + "Rank2KGroupedProblemVisitor with threadblock shape M > threadblock shape N " + "requires that threadblock shape M be a multiple of threadblock shape N."); + + static int32_t const kThreadblockSkewRatio = ThreadblockShape::kM / ThreadblockShape::kN; + + CUTLASS_HOST_DEVICE + static int32_t min_dim(cutlass::gemm::GemmCoord grid) { + return grid.m(); + } + + CUTLASS_HOST_DEVICE + static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) { + return row; + } + + CUTLASS_HOST_DEVICE + static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) { + return (col * kThreadblockSkewRatio) + (threadblock_id % kThreadblockSkewRatio); + } +}; + +// Partial specialization for the case where threadblock shape M < threadblock shape N +template < + typename ThreadblockShape +> +struct Rank2KGroupedProblemVisitorOffsetHelper< + ThreadblockShape, + typename platform::enable_if< (ThreadblockShape::kM < ThreadblockShape::kN) >::type +> { + + static_assert(ThreadblockShape::kN % ThreadblockShape::kM == 0, + "Rank2KGroupedProblemVisitor with threadblock shape M < threadblock shape N " + "requires that threadblock shape N be a multiple of threadblock shape M."); + + static int32_t const kThreadblockSkewRatio = ThreadblockShape::kN / ThreadblockShape::kM; + + CUTLASS_HOST_DEVICE + static int32_t min_dim(cutlass::gemm::GemmCoord grid) { + return grid.n(); + } + + CUTLASS_HOST_DEVICE + static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) { + return (row * kThreadblockSkewRatio) + (threadblock_id % kThreadblockSkewRatio); + } + + CUTLASS_HOST_DEVICE + static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) { + return col; + } +}; + +// Partial specialization for the case where threadblock shape M == threadblock shape N +// In this case, macro tiles are equivalent to true tiles, so the conversions are +// identity functions. +template < + typename ThreadblockShape +> +struct Rank2KGroupedProblemVisitorOffsetHelper< + ThreadblockShape, + typename platform::enable_if< (ThreadblockShape::kM == ThreadblockShape::kN) >::type +> { + + static int32_t const kThreadblockSkewRatio = 1; + + CUTLASS_HOST_DEVICE + static int32_t min_dim(cutlass::gemm::GemmCoord grid) { + return grid.m(); + } + + CUTLASS_HOST_DEVICE + static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) { + return row; + } + + CUTLASS_HOST_DEVICE + static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) { + return col; + } +}; + +// Helper for correctly representing problem sizes in grouped kernels +template +struct Rank2KGroupedProblemSizeHelper { + using OffsetHelper = Rank2KGroupedProblemVisitorOffsetHelper; + + CUTLASS_HOST_DEVICE + static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) { + return cutlass::gemm::GemmCoord( + ((problem.m() - 1 + ThreadblockShape::kM) / ThreadblockShape::kM), + ((problem.n() - 1 + ThreadblockShape::kN) / ThreadblockShape::kN), + 1); + } + + CUTLASS_HOST_DEVICE + static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) { + // Return the number of tiles at or below the diagonal (or at and above + // for mode kUpper). We do this by first calculating this value assuming + // we have a square matrix of tiles of size `dim x dim` where `dim` is the + // minimum among {grid.m(), grid.n()}. We then multiply the resulting value + // by OffsetHelper::kThreadblockSkewRatio to account for cases in which there + // are more tiles in one dimension than the other. + int32_t dim = OffsetHelper::min_dim(grid); + int32_t tiles_on_diagonal = dim; + int32_t tiles_below_diagonal = ((dim * (dim - 1)) / 2); + return (tiles_on_diagonal + tiles_below_diagonal) * OffsetHelper::kThreadblockSkewRatio; + } + + CUTLASS_HOST_DEVICE + static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {} +}; + +} // namespace detail + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Default problem visitor for fill modes kUpper and kLower. +// +template +struct Rank2KGroupedProblemVisitor : public GroupedProblemVisitor< + detail::Rank2KGroupedProblemSizeHelper, + ThreadblockShape, + GroupScheduleMode_, + PrefetchTileCount, + ThreadCount> { + + static cutlass::FillMode const kFillModeC = FillModeC; + + static_assert(kFillModeC == cutlass::FillMode::kLower || kFillModeC == cutlass::FillMode::kUpper, + "Default Rank2KGroupedProblemVisitor requires fill mode of kLower or kUpper."); + + using ProblemSizeHelper = detail::Rank2KGroupedProblemSizeHelper; + using Base = GroupedProblemVisitor; + using OffsetHelper = typename ProblemSizeHelper::OffsetHelper; + using Params = typename Base::Params; + using SharedStorage = typename Base::SharedStorage; + + // + // Methods + // + CUTLASS_DEVICE + Rank2KGroupedProblemVisitor( + Params const ¶ms_, + SharedStorage &shared_storage_, + int32_t block_idx + ): Base(params_, shared_storage_, block_idx) + {} + + CUTLASS_DEVICE + cutlass::gemm::GemmCoord threadblock_offset(int32_t threadblock_id) const { + int32_t macro_id = threadblock_id / OffsetHelper::kThreadblockSkewRatio; + int32_t macro_row = ceil(cutlass::fast_sqrt((2*macro_id) + 2.25) - 0.5) - 1; + int32_t macro_col = macro_id - (((macro_row+1) * macro_row)/2); + + if (kFillModeC == cutlass::FillMode::kUpper) { + swap(macro_row, macro_col); + } + + int32_t row = OffsetHelper::macro_row_to_row(macro_row, threadblock_id); + int32_t col = OffsetHelper::macro_col_to_col(macro_col, threadblock_id); + + return cutlass::gemm::GemmCoord(row, col, 0); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_transpose_operands.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_transpose_operands.h new file mode 100644 index 0000000000000000000000000000000000000000..0837a9d8f797fa8205337571b4b161b74dfaaeca --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_transpose_operands.h @@ -0,0 +1,129 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! + \file + \brief Transpositions for Rank2K problems. +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA_, + typename LayoutA_, + ComplexTransform TransformA, + int AlignmentA, + typename ElementB_, + typename LayoutB_, + ComplexTransform TransformB, + int AlignmentB, + typename LayoutC_, + FillMode FillModeC_, + bool Transpose +> +struct Rank2KMapArguments { + using ElementA = ElementA_; + using LayoutA = LayoutA_; + static ComplexTransform const kTransformA = TransformA; + static int const kAlignmentA = AlignmentA; + using ElementB = ElementB_; + using LayoutB = LayoutB_; + static ComplexTransform const kTransformB = TransformB; + static int const kAlignmentB = AlignmentB; + using LayoutC = LayoutC_; + static FillMode const kFillModeC = FillModeC_; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA_, + typename LayoutA_, + ComplexTransform TransformA, + int AlignmentA, + typename ElementB_, + typename LayoutB_, + ComplexTransform TransformB, + int AlignmentB, + typename LayoutC_, + FillMode FillModeC_ +> +struct Rank2KMapArguments< + ElementA_, + LayoutA_, + TransformA, + AlignmentA, + ElementB_, + LayoutB_, + TransformB, + AlignmentB, + LayoutC_, + FillModeC_, + true +> { + using ElementA = ElementB_; + using LayoutA = LayoutB_; + static ComplexTransform const kTransformA = TransformB; + static int const kAlignmentA = AlignmentB; + using ElementB = ElementA_; + using LayoutB = LayoutA_; + static ComplexTransform const kTransformB = TransformA; + static int const kAlignmentB = AlignmentA; + using LayoutC = typename layout::LayoutTranspose::type; + static FillMode const kFillModeC = InvertFillMode::mode; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} +} +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_universal.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..2775710d61c643c6ae7bacad6984506f338cbf15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_universal.h @@ -0,0 +1,778 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma1_, ///! Threadblock-scoped matrix multiply-accumulate (A*B^T) + typename Mma2_, ///! Threadblock-scoped matrix multiply-accumulate (B*A^T) + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + FillMode FillModeC_, ///! Fill Mode for C (kLower or kUpper) + BlasMode BlasMode_ ///! Blas3 computation mode +> +struct Rank2KUniversal { +public: + + using Mma1 = Mma1_; + using Mma2 = Mma2_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma1::IteratorA::Element; + using ElementB = typename Mma1::IteratorB::Element; + + // Mma1 (A x B^T) + using LayoutA = typename Mma1::IteratorA::Layout; + using LayoutBT = typename Mma1::IteratorB::Layout; + static ComplexTransform const kMma1TransformA = Mma1::kTransformA; + static ComplexTransform const kMma1TransformB = Mma1::kTransformB; + + // Mma2 (B x A^T) + using LayoutB = typename Mma2::IteratorA::Layout; + using LayoutAT = typename Mma2::IteratorB::Layout; + static ComplexTransform const kMma2TransformA = Mma2::kTransformA; + static ComplexTransform const kMma2TransformB = Mma2::kTransformB; + + // Common type definitions for Mma1 and Mma2 + using Operator = typename Mma1::Operator; + using OperatorClass = typename Mma1::Operator::OperatorClass; + using ThreadblockShape = typename Mma1::Shape; + using WarpShape = typename Mma1::Operator::Shape; + using InstructionShape = typename Mma1::Policy::Operator::InstructionShape; + using ArchTag = typename Mma1::ArchTag; + + static int const kStages = Mma1::kStages; + static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements; + + // Output related typedefinitions + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + static FillMode const kFillModeC = FillModeC_; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + static BlasMode const kBlasMode = BlasMode_; + + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma1::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + + // + // Structures + // + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord problem_size; + int batch_count; + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + + typename LayoutA::Stride::Index lda; + typename LayoutB::Stride::Index ldb; + typename LayoutC::Stride::Index ldc; + typename LayoutC::Stride::Index ldd; + + // + // Methods + // + + Arguments(): + mode(GemmUniversalMode::kGemm), + batch_count(1), + ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr) { } + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + typename LayoutA::Stride::Index lda, + typename LayoutB::Stride::Index ldb, + typename LayoutC::Stride::Index ldc, + typename LayoutC::Stride::Index ldd + ): + mode(mode), + problem_size(problem_size), + batch_count(batch_count), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + batch_stride_A(batch_stride_A), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D), + lda(lda), ldb(ldb), ldc(ldc), ldd(ldd) { + + } + + /// Returns arguments for a the transposed problem + Arguments transposed_problem() const { + Arguments args(*this); + + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.batch_stride_A, args.batch_stride_B); + + return args; + } + + }; + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params { + + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + + // Mma1 Iterator A and B params + typename Mma1::IteratorA::Params params_A; + typename Mma1::IteratorB::Params params_BT; + + // Mma2 Iterator A and B params + typename Mma2::IteratorA::Params params_B; + typename Mma2::IteratorB::Params params_AT; + + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::Params params_D; + + typename EpilogueOutputOp::Params output_op; + + GemmUniversalMode mode; + int batch_count; + int gemm_k_size; + + void * ptr_A; + void * ptr_B; + void * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + + int *semaphore; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): + swizzle_log_tile(0), + params_A(0), + params_BT(0), + params_B(0), + params_AT(0), + params_C(0), + params_D(0), + batch_count(0), + gemm_k_size(0), + mode(cutlass::gemm::GemmUniversalMode::kGemm), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + batch_stride_A(0), + batch_stride_B(0), + batch_stride_C(0), + batch_stride_D(0), + semaphore(nullptr) { } + + CUTLASS_HOST_DEVICE + Params( + Arguments const &args, + cutlass::gemm::GemmCoord const & grid_tiled_shape, + int gemm_k_size, + void *workspace = nullptr + ): + problem_size(args.problem_size), + grid_tiled_shape(grid_tiled_shape), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A(args.lda), + params_BT(args.ldb), + params_B(args.ldb), + params_AT(args.lda), + params_C(args.ldc), + params_D(args.ldd), + output_op(args.epilogue), + mode(args.mode), + batch_count(args.batch_count), + gemm_k_size(gemm_k_size), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C(const_cast(args.ptr_C)), + ptr_D(const_cast(args.ptr_D)), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C(args.batch_stride_C), + batch_stride_D(args.batch_stride_D), + semaphore(static_cast(workspace)) { + } + + CUTLASS_HOST_DEVICE + void update( + Arguments const &args, + void *workspace = nullptr) { + + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + + output_op = args.epilogue; + + semaphore = static_cast(workspace); + } + + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma1::SharedStorage mma1_main_loop; + typename Mma2::SharedStorage mma2_main_loop; + typename Epilogue::SharedStorage epilogue; + }; + +public: + + // + // Methods + // + + CUTLASS_DEVICE + Rank2KUniversal() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || + (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || + (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { + + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + return; + } + + // Early exit if Fill Mode is Lower and + // if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal) + if (kFillModeC == cutlass::FillMode::kLower && + (threadblock_tile_offset.m() + 1) * Mma1::Shape::kM <= threadblock_tile_offset.n() * Mma1::Shape::kN) { + return; + } + + // Early exit if Fill Mode is Upper and + // if the entire tile is below the main diagonal (top-right corner is at or below the diagonal) + if (kFillModeC == cutlass::FillMode::kUpper && + threadblock_tile_offset.m() * Mma1::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) { + return; + } + + bool tile_on_diagonal = false; + // Mark tiles that are being crossed by the main diagonal + // (top-right and bottom-left corners are on either side of the diagonal) + if ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM > threadblock_tile_offset.n() * Mma1::Shape::kN + && threadblock_tile_offset.m() * Mma1::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) { + tile_on_diagonal = true; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_MxK{ + threadblock_tile_offset.m() * Mma1::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_KxN{ + offset_k, + threadblock_tile_offset.n() * Mma1::Shape::kN + }; + + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands for Mma1 + typename Mma1::IteratorA iterator_A( + params.params_A, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_MxK); + + typename Mma1::IteratorB iterator_BT( + params.params_BT, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_KxN); + + // Construct iterators to A and B operands for Mma2 + typename Mma2::IteratorA iterator_B( + params.params_B, + ptr_B, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_MxK); + + typename Mma2::IteratorB iterator_AT( + params.params_AT, + ptr_A, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_KxN); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply for Mma1 (A x BT) + Mma1 mma1(shared_storage.mma1_main_loop, thread_idx, warp_idx, lane_idx); + + // Construct thread-scoped matrix multiply for Mma2 (B x AT) + Mma2 mma2(shared_storage.mma2_main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma1::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add (A x BT) + mma1( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_BT, + accumulators); + + // HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK. + if (kBlasMode == BlasMode::kHermitian) { + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma1::Shape::kM, + threadblock_tile_offset.n() * Mma1::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + + // + // Fetch pointers based on mode. + // + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[threadblock_tile_offset.k()]; + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + } + + + // If CTA not on diagonal, FillMode doesn't apply. + FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone; + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.problem_size.mn(), + thread_idx, + threadblock_offset, + kFillModeCTA + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset, + kFillModeCTA + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + __threadfence(); + } + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + // + // Release the semaphore + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + + __syncthreads(); + + accumulators.clear(); + } + + // Compute threadblock-scoped matrix multiply-add (B x AT) + mma2( + gemm_k_iterations, + accumulators, + iterator_B, + iterator_AT, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + /* Needed for HER2K where the second HERK is multiplied by conj(alpha) */ + typename EpilogueOutputOp::Params second_her2k_params(conj(params.output_op.alpha), 1); + EpilogueOutputOp output_op_her2k(second_her2k_params); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma1::Shape::kM, + threadblock_tile_offset.n() * Mma1::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C); + + // HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK. + if (kBlasMode == BlasMode::kHermitian) { + ptr_C = static_cast(params.ptr_D); + } + + ElementC *ptr_D = static_cast(params.ptr_D); + + // + // Fetch pointers based on mode. + // + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + if (kBlasMode == BlasMode::kSymmetric) { + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } else { + output_op_her2k.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[threadblock_tile_offset.k()]; + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + } + + + // If CTA not on diagonal, FillMode doesn't apply. + FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone; + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.problem_size.mn(), + thread_idx, + threadblock_offset, + kFillModeCTA + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset, + kFillModeCTA + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + __threadfence(); + } + + // Execute the epilogue operator to update the destination tensor. + if (kBlasMode == BlasMode::kSymmetric) { + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + } else { + epilogue( + output_op_her2k, + iterator_D, + accumulators, + iterator_C); + } + + // + // Release the semaphore + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_k_universal.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_k_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..188a4e70cfd987093a16625e31e5db2182c52ad4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_k_universal.h @@ -0,0 +1,565 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + FillMode FillModeC_ ///! Fill Mode for C (kLower or kUpper) +> +struct RankKUniversal { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + static FillMode const kFillModeC = FillModeC_; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Split-K preserves splits that are 128b aligned + static int const kSplitKAlignment = 128 / sizeof_bits::value; + + // + // Structures + // + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord problem_size; + int batch_count; + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_C; + int64_t batch_stride_D; + + typename LayoutA::Stride::Index lda; + typename LayoutB::Stride::Index ldb; + typename LayoutC::Stride::Index ldc; + typename LayoutC::Stride::Index ldd; + + // + // Methods + // + + Arguments(): + mode(GemmUniversalMode::kGemm), + batch_count(1), + ptr_A(nullptr), ptr_C(nullptr), ptr_D(nullptr) { } + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_C, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_C, + int64_t batch_stride_D, + typename LayoutA::Stride::Index lda, + typename LayoutC::Stride::Index ldc, + typename LayoutC::Stride::Index ldd + ): + mode(mode), + problem_size(problem_size), + batch_count(batch_count), + epilogue(epilogue), + ptr_A(ptr_A), ptr_C(ptr_C), ptr_D(ptr_D), + batch_stride_A(batch_stride_A), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D), + lda(lda), ldb(ldb), ldc(ldc), ldd(ldd) { + + } + + }; + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params { + + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::Params params_D; + + typename EpilogueOutputOp::Params output_op; + + GemmUniversalMode mode; + int batch_count; + int gemm_k_size; + + void * ptr_A; + void * ptr_B; + void * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + + int *semaphore; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): + swizzle_log_tile(0), + params_A(0), + params_B(0), + params_C(0), + params_D(0), + batch_count(0), + gemm_k_size(0), + mode(cutlass::gemm::GemmUniversalMode::kGemm), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + batch_stride_A(0), + batch_stride_B(0), + batch_stride_C(0), + batch_stride_D(0), + semaphore(nullptr) { } + + CUTLASS_HOST_DEVICE + Params( + Arguments const &args, + cutlass::gemm::GemmCoord const & grid_tiled_shape, + int gemm_k_size, + void *workspace = nullptr + ): + problem_size(args.problem_size), + grid_tiled_shape(grid_tiled_shape), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A(args.lda), + params_B(args.lda), + params_C(args.ldc), + params_D(args.ldd), + output_op(args.epilogue), + mode(args.mode), + batch_count(args.batch_count), + gemm_k_size(gemm_k_size), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_A)), + ptr_C(const_cast(args.ptr_C)), + ptr_D(const_cast(args.ptr_D)), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_A), + batch_stride_C(args.batch_stride_C), + batch_stride_D(args.batch_stride_D), + semaphore(static_cast(workspace)) { + } + + CUTLASS_HOST_DEVICE + void update( + Arguments const &args, + void *workspace = nullptr) { + + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_A); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + + output_op = args.epilogue; + + semaphore = static_cast(workspace); + } + + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + +public: + + // + // Methods + // + + CUTLASS_DEVICE + RankKUniversal() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || + (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || + (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { + + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + return; + } + + // Early exit if Fill Mode is Lower and + // if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal) + if (kFillModeC == cutlass::FillMode::kLower && + (threadblock_tile_offset.m() + 1) * Mma::Shape::kM <= threadblock_tile_offset.n() * Mma::Shape::kN) { + return; + } + + // Early exit if Fill Mode is Upper and + // if the entire tile is below the main diagonal (top-right corner is at or below the diagonal) + if (kFillModeC == cutlass::FillMode::kUpper && + threadblock_tile_offset.m() * Mma::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma::Shape::kN) { + return; + } + + bool tile_on_diagonal = false; + // Mark tiles that are being crossed by the main diagonal + // (top-right and bottom-left corners are on either side of the diagonal) + if ((threadblock_tile_offset.m() + 1) * Mma::Shape::kM > threadblock_tile_offset.n() * Mma::Shape::kN + && threadblock_tile_offset.m() * Mma::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma::Shape::kN) { + tile_on_diagonal = true; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; + ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[threadblock_tile_offset.k()]; + ptr_B = static_cast(params.ptr_B)[threadblock_tile_offset.k()]; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_B{ + offset_k, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + + // + // Fetch pointers based on mode. + // + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[threadblock_tile_offset.k()]; + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + } + + + // If CTA not on diagonal, FillMode doesn't apply. + FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone; + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.problem_size.mn(), + thread_idx, + threadblock_offset, + kFillModeCTA + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset, + kFillModeCTA + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + __threadfence(); + } + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + // + // Release the semaphore + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm70_gemm.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm70_gemm.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ce811a7bbaee26442768fa0ea690c69bd0b5048f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm70_gemm.hpp @@ -0,0 +1,263 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/kernel_hardware_info.hpp" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/dispatch_policy.hpp" + +#include "cute/tensor.hpp" + +namespace cutlass::gemm::kernel { + +/////////////////////////////////////////////////////////////////////////////// + +template < + class ProblemShape_, + class CollectiveMainloop_, + class CollectiveEpilogue_, + class TileScheduler_ +> +class GemmUniversal< + ProblemShape_, + CollectiveMainloop_, + CollectiveEpilogue_, + TileScheduler_, + cute::enable_if_t>> +{ +public: + // + // Type Aliases + // + using ProblemShape = ProblemShape_; + + static_assert(rank(ProblemShape{}) == 3 or rank(ProblemShape{}) == 4, + "ProblemShape{} should be or "); + + // Mainloop derived types + using CollectiveMainloop = CollectiveMainloop_; + using TileShape = typename CollectiveMainloop::TileShape; + using TiledMma = typename CollectiveMainloop::TiledMma; + using ArchTag = typename CollectiveMainloop::ArchTag; + using ElementA = typename CollectiveMainloop::ElementA; + using StrideA = typename CollectiveMainloop::StrideA; + using ElementB = typename CollectiveMainloop::ElementB; + using StrideB = typename CollectiveMainloop::StrideB; + using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; + using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; + using MainloopArguments = typename CollectiveMainloop::Arguments; + using MainloopParams = typename CollectiveMainloop::Params; + + static_assert(cute::is_void_v or cute::is_same_v, + "SM70 kernel does not support specializing the tile scheduler."); + using TileSchedulerTag = TileScheduler_; + using TileScheduler = typename detail::TileSchedulerSelector< + TileScheduler_, ArchTag, TileShape, + cute::Shape, cute::Int<1>, cute::Int<1>>>::Scheduler; + using TileSchedulerArguments = typename TileScheduler::Arguments; + + // Epilogue derived types + using CollectiveEpilogue = CollectiveEpilogue_; + using ElementC = typename CollectiveEpilogue::ElementC; + using StrideC = typename CollectiveEpilogue::StrideC; + using ElementD = typename CollectiveEpilogue::ElementD; + using StrideD = typename CollectiveEpilogue::StrideD; + using EpilogueArguments = typename CollectiveEpilogue::Arguments; + using EpilogueParams = typename CollectiveEpilogue::Params; + static_assert(cute::is_same_v, + "Mainloop and epilogue do not agree on accumulator value type."); + + // MSVC requires the cast to fix a warning-as-error. + static constexpr int SharedStorageSize = static_cast(cute::max( + sizeof(typename CollectiveMainloop::SharedStorage), + sizeof(typename CollectiveEpilogue::SharedStorage))); + + static constexpr uint32_t MaxThreadsPerBlock = cute::size(TiledMma{}); + static constexpr uint32_t MinBlocksPerMultiprocessor = 1; + + // Device side arguments + struct Arguments { + GemmUniversalMode mode{}; + ProblemShape problem_shape{}; + MainloopArguments mainloop{}; + EpilogueArguments epilogue{}; + KernelHardwareInfo hw_info{}; + TileSchedulerArguments scheduler{}; + }; + + // Kernel entry point API + struct Params { + GemmUniversalMode mode; + ProblemShape problem_shape; + MainloopParams mainloop; + EpilogueParams epilogue; + }; + + // + // Methods + // + + // Convert to underlying arguments. In this case, a simple copy for the aliased type. + static + Params + to_underlying_arguments(Arguments const& args, void* workspace) { + (void) workspace; + return { + args.mode, + args.problem_shape, + CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace), + CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace) + }; + } + + static bool + can_implement(Arguments const& args) { + return args.mode == GemmUniversalMode::kGemm or + (args.mode == GemmUniversalMode::kBatched && rank(ProblemShape{}) == 4); + } + + static int + get_workspace_size(Arguments const& args) { + return 0; + } + + static + cutlass::Status + initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr) { + return Status::kSuccess; + } + + static dim3 + get_grid_shape(Params const& params) { + int batch_count = 1; + if constexpr (rank(ProblemShape{}) == 4) { + batch_count = cute::size<3>(params.problem_shape); + } + + return dim3( + cute::size(cute::ceil_div(cute::shape<0>(params.problem_shape), cute::shape<0>(TileShape{}))), + cute::size(cute::ceil_div(cute::shape<1>(params.problem_shape), cute::shape<1>(TileShape{}))), + batch_count + ); + } + + static dim3 + get_block_shape() { + return dim3(MaxThreadsPerBlock, 1, 1); + } + + CUTLASS_DEVICE + void + operator()(Params const& params, char* smem_buf) { + using namespace cute; + using X = Underscore; + + // Preconditions + CUTE_STATIC_ASSERT(is_static::value); + + // Separate out problem shape for convenience + // Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK) + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + auto M = get<0>(problem_shape_MNKL); + auto N = get<1>(problem_shape_MNKL); + auto K = get<2>(problem_shape_MNKL); + auto L = get<3>(problem_shape_MNKL); + + // Preconditions + static_assert(rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + + // Get the appropriate blocks for this thread block -- potential for thread block locality + int thread_idx = int(threadIdx.x); + auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) + auto [m_coord, n_coord, l_coord] = blockIdx; + auto blk_coord_mnkl = make_coord(m_coord, n_coord, _, l_coord); // (m,n,k,l) + + // Represent the full tensors + Tensor mA_mkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_A), make_shape(M,K,L), params.mainloop.dA); //(m,k,l) + Tensor mB_nkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_B), make_shape(N,K,L), params.mainloop.dB); //(n,k,l) + + // Get batch slice + Tensor mA_mk = mA_mkl(_,_,l_coord); // (m,k) + Tensor mB_nk = mB_nkl(_,_,l_coord); // (n,k) + + // Slice to get the tiles this thread block is responsible for + Tensor gA = local_tile(mA_mk, blk_shape, take<0,3>(blk_coord_mnkl), Step<_1, X,_1>{}); // (BLK_M,BLK_K,k) + Tensor gB = local_tile(mB_nk, blk_shape, take<0,3>(blk_coord_mnkl), Step< X,_1,_1>{}); // (BLK_N,BLK_K,k) + + // Compute tile residues for predication + auto m_max_coord = M - size<0>(gA) * get<0>(blk_coord_mnkl); // M - BLK_M * m_coord + auto n_max_coord = N - size<0>(gB) * get<1>(blk_coord_mnkl); // N - BLK_N * n_coord + auto k_residue = K - size<1>(gA) * size<2>(gA); // K - BLK_K * k_coord_max + auto residue_mnk = make_tuple(m_max_coord, n_max_coord, k_residue); + + // Allocate the tiled_mma and the accumulators for the (M,N) blk_shape + TiledMma tiled_mma; + Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) + clear(accumulators); + + auto k_tile_iter = cute::make_coord_iterator(shape<2>(gA)); + int k_tile_count = size<2>(gA); + + // Perform the collective scoped MMA + CollectiveMainloop collective_mma; + collective_mma( + accumulators, + gA, + gB, + accumulators, + k_tile_iter, k_tile_count, + residue_mnk, + thread_idx, + smem_buf + ); + + // Epilogue and write to gD + CollectiveEpilogue epilogue{params.epilogue}; + epilogue( + problem_shape_MNKL, + blk_shape, + blk_coord_mnkl, + accumulators, + tiled_mma, + residue_mnk, + thread_idx, + smem_buf + ); + } +}; + +/////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::gemm::kernel diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma.hpp new file mode 100644 index 0000000000000000000000000000000000000000..8091672f87c429f7b1340301bffaed85ed1b3ef2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma.hpp @@ -0,0 +1,318 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/kernel_hardware_info.hpp" +#include "cute/arch/cluster_sm90.hpp" +#include "cutlass/arch/mma_sm90.h" +#include "cutlass/epilogue/collective/detail.hpp" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/dispatch_policy.hpp" +#include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" +#include "cutlass/trace.h" + +#include "cute/tensor.hpp" + +/////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::gemm::kernel { + +namespace detail { + +// IF_SWAP_AB::value will be true only if: +// class T has member SwapAB and T::SwapAB is true +template +struct IF_SWAP_AB { static constexpr bool value = false; }; + +template +struct IF_SWAP_AB > +{ static constexpr bool value = T::SwapAB; }; + +} // namespace + +/////////////////////////////////////////////////////////////////////////////// + +template < + class ProblemShape_, + class CollectiveMainloop_, + class CollectiveEpilogue_, + class TileScheduler_ +> +class GemmUniversal< + ProblemShape_, + CollectiveMainloop_, + CollectiveEpilogue_, + TileScheduler_, + cute::enable_if_t>> +{ +public: + // + // Type Aliases + // + using ProblemShape = ProblemShape_; + static_assert(rank(ProblemShape{}) == 3 or rank(ProblemShape{}) == 4, + "ProblemShape{} should be or "); + + // Mainloop derived types + using CollectiveMainloop = CollectiveMainloop_; + using TileShape = typename CollectiveMainloop::TileShape; + using TiledMma = typename CollectiveMainloop::TiledMma; + using ArchTag = typename CollectiveMainloop::ArchTag; + using ElementA = typename CollectiveMainloop::ElementA; + using StrideA = typename CollectiveMainloop::StrideA; + using ElementB = typename CollectiveMainloop::ElementB; + using StrideB = typename CollectiveMainloop::StrideB; + using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; + using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; + using ClusterShape = typename DispatchPolicy::ClusterShape; + using MainloopArguments = typename CollectiveMainloop::Arguments; + using MainloopParams = typename CollectiveMainloop::Params; + static_assert(ArchTag::kMinComputeCapability >= 90); + + // Epilogue derived types + using CollectiveEpilogue = CollectiveEpilogue_; + using ElementC = typename CollectiveEpilogue::ElementC; + using StrideC = typename CollectiveEpilogue::StrideC; + using ElementD = typename CollectiveEpilogue::ElementD; + using StrideD = typename CollectiveEpilogue::StrideD; + using EpilogueArguments = typename CollectiveEpilogue::Arguments; + using EpilogueParams = typename CollectiveEpilogue::Params; + static_assert(cute::is_same_v, + "Mainloop and epilogue do not agree on accumulator value type."); + + static_assert(cute::is_void_v or cute::is_same_v, + "TMA kernel does not support specializing the tile scheduler."); + using TileSchedulerTag = TileScheduler_; + using TileScheduler = typename detail::TileSchedulerSelector< + TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler; + using TileSchedulerArguments = typename TileScheduler::Arguments; + + static constexpr int SharedStorageSize = static_cast(cute::max( + sizeof(typename CollectiveMainloop::SharedStorage), + sizeof(typename CollectiveEpilogue::SharedStorage))); + + static constexpr uint32_t MaxThreadsPerBlock = size(TiledMma{}); + static constexpr uint32_t MinBlocksPerMultiprocessor = 1; + + // Device side arguments + struct Arguments { + GemmUniversalMode mode{}; + ProblemShape problem_shape{}; + MainloopArguments mainloop{}; + EpilogueArguments epilogue{}; + KernelHardwareInfo hw_info{}; + TileSchedulerArguments scheduler{}; + }; + + // Kernel entry point API + struct Params { + GemmUniversalMode mode; + ProblemShape problem_shape; + MainloopParams mainloop; + EpilogueParams epilogue; + }; + + // + // Methods + // + + // Convert to underlying arguments. In this case, a simple copy for the aliased type. + static + Params + to_underlying_arguments(Arguments const& args, void* workspace) { + (void) workspace; + auto problem_shape = args.problem_shape; + if constexpr (detail::IF_SWAP_AB::value) { + // swap M/N + get<0>(problem_shape) = get<1>(args.problem_shape); + get<1>(problem_shape) = get<0>(args.problem_shape); + } + return { + args.mode, + problem_shape, + CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace), + CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace) + }; + } + + CUTLASS_HOST_DEVICE static + bool + can_implement(Arguments const& args) { + bool implementable = (args.mode == GemmUniversalMode::kGemm) or + (args.mode == GemmUniversalMode::kBatched && rank(ProblemShape{}) == 4); + if (!implementable) { + CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n"); + return implementable; + } + implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); + implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); + return implementable; + } + + static int + get_workspace_size(Arguments const& args) { + return 0; + } + + static cutlass::Status + initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr) { + return Status::kSuccess; + } + + // Computes the kernel launch grid shape based on runtime parameters + static dim3 + get_grid_shape(Params const& params) { + auto cluster_shape = ClusterShape{}; + auto tile_shape = TileShape{}; + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + return TileScheduler::get_tiled_cta_shape_mnl( + problem_shape_MNKL, tile_shape, cluster_shape); + } + + static dim3 + get_block_shape() { + return dim3(MaxThreadsPerBlock, 1, 1); + } + + CUTLASS_DEVICE + void + operator()(Params const& params, char* smem_buf) { + using namespace cute; + using X = Underscore; + + // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. + #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) + if constexpr(size<0>(typename TiledMma::AtomShape_MNK{}) == 64) { + printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); + return; + } + #endif + + // Preconditions + static_assert(rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + + int thread_idx = int(threadIdx.x); + int warp_idx = canonical_warp_idx_sync(); + int lane_predicate = cute::elect_one_sync(); + uint32_t block_rank_in_cluster = cute::block_rank_in_cluster(); + + // Issue Tma Descriptor Prefetch from a single thread + if ((warp_idx == 0) && lane_predicate) { + CollectiveMainloop::prefetch_tma_descriptors(params.mainloop); + } + + // Separate out problem shape for convenience + // Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK) + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + auto M = get<0>(problem_shape_MNKL); + auto N = get<1>(problem_shape_MNKL); + auto K = get<2>(problem_shape_MNKL); + auto L = get<3>(problem_shape_MNKL); + + // TMA requires special handling of strides to deal with coord codomain mapping + // Represent the full tensors -- get these from TMA + Tensor mA_mkl = params.mainloop.tma_load_a.get_tma_tensor(make_shape(M,K,L)); // (m,k,l) + Tensor mB_nkl = params.mainloop.tma_load_b.get_tma_tensor(make_shape(N,K,L)); // (n,k,l) + + // Get the appropriate blocks for this thread block -- potential for thread block locality + auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) + auto blk_coord = make_coord(_,_,_); // (m,n,k) -- defer the slice + + // Make tiled views + Tensor gA_mkl = local_tile(mA_mkl, blk_shape, blk_coord, Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l) + Tensor gB_nkl = local_tile(mB_nkl, blk_shape, blk_coord, Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l) + + // Compute m_coord, n_coord, and l_coord with their post-tiled shapes + auto m_coord = idx2crd(int(blockIdx.x), shape<2>(gA_mkl)); + auto n_coord = idx2crd(int(blockIdx.y), shape<2>(gB_nkl)); + auto l_coord = idx2crd(int(blockIdx.z), shape<4>(gB_nkl)); + auto output_tile_coord = make_coord(m_coord, n_coord, _, l_coord); + + // Slice with m_coord and n_coord + Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k) + Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k) + + // Allocate the tiled_mma and the accumulators for the (M,N) blk_shape + TiledMma tiled_mma; + Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) + + auto k_tile_iter = cute::make_coord_iterator(shape<2>(gA)); + auto k_tile_count = size<2>(gA); + + // Perform the collective scoped MMA + CollectiveMainloop collective_mma; + collective_mma( + gA, params.mainloop.tma_load_a, + gB, params.mainloop.tma_load_b, + accumulators, + k_tile_iter, k_tile_count, + thread_idx, + block_rank_in_cluster, + smem_buf, + params.mainloop + ); + + constexpr int BLK_M_RANK = rank<0>(blk_shape); + bool m_oob = int(blockIdx.x) >= size<2>(gA_mkl); + auto m_max_coord = unwrap(cute::transform(make_seq{}, [&](auto i) { + return m_oob ? 0 : get(M) - get<0,i>(blk_shape) * get(m_coord); + })); + + constexpr int BLK_N_RANK = rank<1>(blk_shape); + bool n_oob = int(blockIdx.y) >= size<2>(gB_nkl); + auto n_max_coord = unwrap(cute::transform(make_seq{}, [&](auto i) { + return n_oob ? 0 : get(N) - get<1,i>(blk_shape) * get(n_coord); + })); + auto residue_mnk = make_tuple(m_max_coord, n_max_coord, Int<0>{}); + + // Epilogue and write to gD + CollectiveEpilogue epilogue{params.epilogue}; + epilogue( + problem_shape_MNKL, + blk_shape, + output_tile_coord, + accumulators, + tiled_mma, + residue_mnk, + thread_idx, + smem_buf + ); + } +}; + +/////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::gemm::kernel diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp new file mode 100644 index 0000000000000000000000000000000000000000..e5ae25a70cfe1fb8a501194936d498bf6c565450 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp @@ -0,0 +1,451 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/kernel_hardware_info.hpp" +#include "cute/arch/cluster_sm90.hpp" +#include "cutlass/arch/reg_reconfig.h" +#include "cutlass/arch/mma_sm90.h" +#include "cutlass/epilogue/collective/detail.hpp" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/dispatch_policy.hpp" +#include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" +#include "cutlass/pipeline/pipeline.hpp" +#include "cutlass/trace.h" + +#include "cute/tensor.hpp" + +/////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::gemm::kernel { + +/////////////////////////////////////////////////////////////////////////////// + +template < + class ProblemShape_, + class CollectiveMainloop_, + class CollectiveEpilogue_, + class TileScheduler_ +> +class GemmUniversal< + ProblemShape_, + CollectiveMainloop_, + CollectiveEpilogue_, + TileScheduler_, + cute::enable_if_t>> +{ +public: + // + // Type Aliases + // + using ProblemShape = ProblemShape_; + static_assert(rank(ProblemShape{}) == 3 or rank(ProblemShape{}) == 4, + "ProblemShape{} should be or "); + + // Mainloop derived types + using CollectiveMainloop = CollectiveMainloop_; + using TileShape = typename CollectiveMainloop::TileShape; + using TiledMma = typename CollectiveMainloop::TiledMma; + using ArchTag = typename CollectiveMainloop::ArchTag; + using ElementA = typename CollectiveMainloop::ElementA; + using StrideA = typename CollectiveMainloop::StrideA; + using ElementB = typename CollectiveMainloop::ElementB; + using StrideB = typename CollectiveMainloop::StrideB; + using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; + using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; + using ClusterShape = typename DispatchPolicy::ClusterShape; + using MainloopArguments = typename CollectiveMainloop::Arguments; + using MainloopParams = typename CollectiveMainloop::Params; + static_assert(ArchTag::kMinComputeCapability >= 90); + + // Epilogue derived types + using CollectiveEpilogue = CollectiveEpilogue_; + using ElementC = typename CollectiveEpilogue::ElementC; + using StrideC = typename CollectiveEpilogue::StrideC; + using ElementD = typename CollectiveEpilogue::ElementD; + using StrideD = typename CollectiveEpilogue::StrideD; + using EpilogueArguments = typename CollectiveEpilogue::Arguments; + using EpilogueParams = typename CollectiveEpilogue::Params; + + static_assert(cute::is_void_v or cute::is_same_v, + "TMA warp-specialized kernel does not support specializing the tile scheduler."); + using TileSchedulerTag = TileScheduler_; + using TileScheduler = typename detail::TileSchedulerSelector< + TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler; + using TileSchedulerArguments = typename TileScheduler::Arguments; + + // Kernel level shared memory storage + struct SharedStorage { + // Mainloop and epilogue don't use smem concurrently since kernel is non-persistent, so we can use a union + union TensorStorage { + using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage; + using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage; + + MainloopTensorStorage mainloop; + EpilogueTensorStorage epilogue; + } tensors; + + struct PipelineStorage : cute::aligned_struct<16> { + using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage; + using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage; + + alignas(16) MainloopPipelineStorage mainloop; + alignas(16) EpiLoadPipelineStorage epi_load; + } pipelines; + }; + + static constexpr int SharedStorageSize = sizeof(SharedStorage); + + static constexpr uint32_t NumLoadWarpGroups = 1; + static constexpr uint32_t NumMmaWarpGroups = 1; + static constexpr uint32_t MaxThreadsPerBlock = size(TiledMma{}) + (NumLoadWarpGroups * NumThreadsPerWarpGroup); + static constexpr uint32_t MinBlocksPerMultiprocessor = 1; + + // Device side arguments + struct Arguments { + GemmUniversalMode mode{}; + ProblemShape problem_shape{}; + MainloopArguments mainloop{}; + EpilogueArguments epilogue{}; + KernelHardwareInfo hw_info{}; + TileSchedulerArguments scheduler{}; + }; + + // Kernel entry point API + struct Params { + GemmUniversalMode mode; + ProblemShape problem_shape; + MainloopParams mainloop; + EpilogueParams epilogue; + }; + + // + // Methods + // + + // Convert to underlying arguments. In this case, a simple copy for the aliased type. + static + Params + to_underlying_arguments(Arguments const& args, void* workspace) { + (void) workspace; + auto problem_shape = args.problem_shape; + if constexpr (detail::IF_SWAP_AB::value) { + // swap M/N + get<0>(problem_shape) = get<1>(args.problem_shape); + get<1>(problem_shape) = get<0>(args.problem_shape); + } + return { + args.mode, + problem_shape, + CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace), + CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace) + }; + } + + CUTLASS_HOST_DEVICE static + bool + can_implement(Arguments const& args) { + bool implementable = (args.mode == GemmUniversalMode::kGemm) or + (args.mode == GemmUniversalMode::kBatched && rank(ProblemShape{}) == 4); + if (!implementable) { + CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n"); + return implementable; + } + implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); + implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); + return implementable; + } + + static + int + get_workspace_size(Arguments const& args) { + return 0; + } + + static + cutlass::Status + initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr) { + return Status::kSuccess; + } + + // Computes the kernel launch grid shape based on runtime parameters + static dim3 + get_grid_shape(Params const& params) { + auto cluster_shape = ClusterShape{}; + auto tile_shape = TileShape{}; + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + return TileScheduler::get_tiled_cta_shape_mnl( + problem_shape_MNKL, tile_shape, cluster_shape); + } + + static dim3 + get_block_shape() { + return dim3(MaxThreadsPerBlock, 1, 1); + } + + CUTLASS_DEVICE + void + operator()(Params const& params, char* smem_buf) { + using namespace cute; + using X = Underscore; + + // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. + #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) + if constexpr(size<0>(typename TiledMma::AtomShape_MNK{}) == 64) { + printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); + return; + } + #endif + + enum class WarpGroupRole { + Producer = 0, + Consumer = 1, + }; + enum class ProducerWarpRole { + MainloopEpilogue = 0, + Warp1 = 1, + Warp2 = 2, + Warp3 = 3 + }; + + // Kernel level shared memory storage + SharedStorage& shared_storage = *reinterpret_cast(smem_buf); + + int thread_idx = int(threadIdx.x); + int lane_idx = canonical_lane_idx(); + int warp_idx = canonical_warp_idx_sync(); + int warp_idx_in_warp_group = warp_idx % NumWarpsPerWarpGroup; + int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup; + auto warp_group_role = WarpGroupRole(canonical_warp_group_idx()); + auto producer_warp_role = ProducerWarpRole(warp_idx_in_warp_group); + int lane_predicate = cute::elect_one_sync(); + uint32_t block_rank_in_cluster = cute::block_rank_in_cluster(); + + + // Issue Tma Descriptor Prefetch from a single thread + if ((warp_idx == 0) && lane_predicate) { + CollectiveMainloop::prefetch_tma_descriptors(params.mainloop); + CollectiveEpilogue::prefetch_tma_descriptors(params.epilogue); + } + + // Mainloop Load pipeline + using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline; + typename MainloopPipeline::Params mainloop_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::MainloopEpilogue) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; + } + mainloop_pipeline_params.is_leader = warp_group_thread_idx == 0; + mainloop_pipeline_params.num_consumers = NumThreadsPerWarpGroup; + mainloop_pipeline_params.transaction_bytes = CollectiveMainloop::TmaTransactionBytes; + MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params); + + // Epilogue Load pipeline + using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline; + typename EpiLoadPipeline::Params epi_load_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::MainloopEpilogue) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer; + } + epi_load_pipeline_params.dst_blockid = cute::block_rank_in_cluster(); + epi_load_pipeline_params.producer_arv_count = NumThreadsPerWarp; + epi_load_pipeline_params.consumer_arv_count = NumThreadsPerWarpGroup; + epi_load_pipeline_params.transaction_bytes = CollectiveEpilogue::TmaTransactionBytes; + EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params); + + // Epilogue Store pipeline + using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline; + typename EpiStorePipeline::Params epi_store_pipeline_params; + epi_store_pipeline_params.always_wait = true; + EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params); + + // Initialize starting pipeline states for the collectives + // Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding) + typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state; + typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state; + + // For the DMA Load (producer) we start with an opposite phase + // i.e., we skip all waits since we know that the buffer is indeed empty + PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state(); + + auto cluster_wait_fn = [&] () { + // We need this to guarantee that the Pipeline init is visible + // To all producers and consumer thread blocks in the Cluster + if constexpr (size(ClusterShape{}) > 1) { + cute::cluster_arrive_relaxed(); + return [] () { cute::cluster_wait(); }; + } + else { + __syncthreads(); + return [] () {}; // do nothing + } + } (); + + // Preconditions + static_assert(rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + + // Separate out problem shape for convenience + // Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK) + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + auto M = get<0>(problem_shape_MNKL); + auto N = get<1>(problem_shape_MNKL); + auto K = get<2>(problem_shape_MNKL); + auto L = get<3>(problem_shape_MNKL); + + // TMA requires special handling of strides to deal with coord codomain mapping + // Represent the full tensors -- get these from TMA + Tensor mA_mkl = params.mainloop.tma_load_a.get_tma_tensor(make_shape(M,K,L)); // (m,k,l) + Tensor mB_nkl = params.mainloop.tma_load_b.get_tma_tensor(make_shape(N,K,L)); // (n,k,l) + + // Get the appropriate blocks for this thread block -- potential for thread block locality + auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) + TiledMma tiled_mma; + + // Make tiled views, defer the slice + Tensor gA_mkl = local_tile(mA_mkl, blk_shape, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l) + Tensor gB_nkl = local_tile(mB_nkl, blk_shape, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l) + + // Compute m_coord, n_coord, and l_coord with their post-tiled shapes + auto m_coord = idx2crd(int(blockIdx.x), shape<2>(gA_mkl)); + auto n_coord = idx2crd(int(blockIdx.y), shape<2>(gB_nkl)); + auto l_coord = idx2crd(int(blockIdx.z), shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + // Slice with m_coord and n_coord + Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k) + Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k) + + // Get pipeline iterators and increments from tensor shapes + auto k_tile_iter = cute::make_coord_iterator(shape<2>(gA)); + auto k_tile_count = size<2>(gA); + + // Wait for all thread blocks in the Cluster + cluster_wait_fn(); + + // In a warp specialized kernel, collectives expose data movement and compute operations separately + CollectiveMainloop collective_mainloop; + CollectiveEpilogue collective_epilogue(params.epilogue, shared_storage.tensors.epilogue); + + if (warp_group_role == WarpGroupRole::Producer) { + if (producer_warp_role == ProducerWarpRole::MainloopEpilogue) { + collective_mainloop.load( + mainloop_pipeline, + mainloop_pipe_producer_state, + gA, params.mainloop.tma_load_a, + gB, params.mainloop.tma_load_b, + k_tile_iter, k_tile_count, + lane_idx, + block_rank_in_cluster, + shared_storage.tensors.mainloop + ); + // Update starting mainloop pipeline state for the pipeline drain + mainloop_pipe_producer_state.advance(k_tile_count); + // Make sure mainloop consumer has been waited upon before issuing epilogue load + collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state); + + if (collective_epilogue.is_producer_load_needed()) { + // Ensure warp is converged before issuing epilogue loads + __syncwarp(); + epi_load_pipe_producer_state = + collective_epilogue.load( + epi_load_pipeline, + epi_load_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + tiled_mma, + lane_idx, + shared_storage.tensors.epilogue + ); + collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state); + } + } + } + else if (warp_group_role == WarpGroupRole::Consumer) { + Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) + + collective_mainloop.mma( + mainloop_pipeline, + mainloop_pipe_consumer_state, + accumulators, + k_tile_count, + thread_idx, + shared_storage.tensors.mainloop, + params.mainloop + ); + + // Make sure the math instructions are done and free buffers before entering the epilogue + collective_mainloop.mma_tail( + mainloop_pipeline, + mainloop_pipe_consumer_state, + k_tile_count + ); + + // Epilogue and write to gD + auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] = + collective_epilogue.store( + epi_load_pipeline, + epi_load_pipe_consumer_state, + epi_store_pipeline, + epi_store_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + accumulators, + tiled_mma, + warp_group_thread_idx, + shared_storage.tensors.epilogue + ); + + collective_epilogue.store_tail( + epi_load_pipeline, + epi_load_pipe_consumer_state_next, + epi_store_pipeline, + epi_store_pipe_producer_state_next + ); + } + } +}; + +/////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::gemm::kernel diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_cooperative.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_cooperative.hpp new file mode 100644 index 0000000000000000000000000000000000000000..7ad54f4afc3398b47b723fb1f0132984d95c39a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_cooperative.hpp @@ -0,0 +1,590 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/kernel_hardware_info.hpp" +#include "cute/arch/cluster_sm90.hpp" +#include "cutlass/arch/reg_reconfig.h" +#include "cutlass/arch/mma_sm90.h" +#include "cutlass/epilogue/collective/detail.hpp" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/dispatch_policy.hpp" +#include "cutlass/gemm/kernel/tile_scheduler.hpp" +#include "cutlass/pipeline/pipeline.hpp" +#include "cute/tensor.hpp" +#include "cutlass/trace.h" + +/////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::gemm::kernel { + +/////////////////////////////////////////////////////////////////////////////// + +template < + class ProblemShape_, + class CollectiveMainloop_, + class CollectiveEpilogue_, + class TileScheduler_ +> +class GemmUniversal< + ProblemShape_, + CollectiveMainloop_, + CollectiveEpilogue_, + TileScheduler_, + cute::enable_if_t>> +{ +public: + // + // Type Aliases + // + using ProblemShape = ProblemShape_; + static_assert(rank(ProblemShape{}) == 3 or rank(ProblemShape{}) == 4, + "ProblemShape{} should be or "); + + // Mainloop derived types + using CollectiveMainloop = CollectiveMainloop_; + using TileShape = typename CollectiveMainloop::TileShape; + using TiledMma = typename CollectiveMainloop::TiledMma; + using ArchTag = typename CollectiveMainloop::ArchTag; + using ElementA = typename CollectiveMainloop::ElementA; + using StrideA = typename CollectiveMainloop::StrideA; + using ElementB = typename CollectiveMainloop::ElementB; + using StrideB = typename CollectiveMainloop::StrideB; + using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; + using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; + using ClusterShape = typename DispatchPolicy::ClusterShape; + using MainloopArguments = typename CollectiveMainloop::Arguments; + using MainloopParams = typename CollectiveMainloop::Params; + + // Epilogue derived types + using CollectiveEpilogue = CollectiveEpilogue_; + using ElementC = typename CollectiveEpilogue::ElementC; + using StrideC = typename CollectiveEpilogue::StrideC; + using ElementD = typename CollectiveEpilogue::ElementD; + using StrideD = typename CollectiveEpilogue::StrideD; + using EpilogueArguments = typename CollectiveEpilogue::Arguments; + using EpilogueParams = typename CollectiveEpilogue::Params; + + static_assert(ArchTag::kMinComputeCapability >= 90); + + using TileSchedulerTag = TileScheduler_; + using TileScheduler = typename detail::TileSchedulerSelector< + TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler; + using TileSchedulerArguments = typename TileScheduler::Arguments; + using TileSchedulerParams = typename TileScheduler::Params; + + static constexpr uint32_t NumLoadWarpGroups = 1; + static constexpr uint32_t NumMmaWarpGroups = size(TiledMma{}) / NumThreadsPerWarpGroup; + static constexpr uint32_t MaxThreadsPerBlock = size(TiledMma{}) + (NumLoadWarpGroups * NumThreadsPerWarpGroup); + static constexpr uint32_t MinBlocksPerMultiprocessor = 1; + + /// Register requirement for Load and Math WGs + static constexpr uint32_t LoadRegisterRequirement = 40; + static constexpr uint32_t MmaRegisterRequirement = 232; + + // 1 stage ordered sequence between mainloop and epilogue producer load threads + using LoadWarpOrderBarrier = cutlass::OrderedSequenceBarrier<1,2>; + + // Kernel level shared memory storage + struct SharedStorage { + struct TensorStorage : cute::aligned_struct<128> { + using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage; + using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage; + + MainloopTensorStorage mainloop; + EpilogueTensorStorage epilogue; + } tensors; + + struct PipelineStorage : cute::aligned_struct<16> { + using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage; + using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage; + + alignas(16) MainloopPipelineStorage mainloop; + alignas(16) EpiLoadPipelineStorage epi_load; + alignas(16) typename LoadWarpOrderBarrier::SharedStorage load_order; + } pipelines; + }; + + static constexpr int SharedStorageSize = sizeof(SharedStorage); + + // Device side arguments + struct Arguments { + GemmUniversalMode mode{}; + ProblemShape problem_shape{}; + MainloopArguments mainloop{}; + EpilogueArguments epilogue{}; + KernelHardwareInfo hw_info{}; + TileSchedulerArguments scheduler{}; + }; + + // Kernel entry point API + struct Params { + GemmUniversalMode mode; + ProblemShape problem_shape; + MainloopParams mainloop; + EpilogueParams epilogue; + KernelHardwareInfo hw_info; + TileSchedulerParams scheduler; + void* workspace; + }; + + // + // Methods + // + + // Convert to underlying arguments. In this case, a simple copy for the aliased type. + static + Params + to_underlying_arguments(Arguments const& args, void* workspace) { + CUTLASS_TRACE_HOST("to_underlying_arguments():"); + + auto problem_shape = args.problem_shape; + if constexpr (detail::IF_SWAP_AB::value) { + // swap M/N + get<0>(problem_shape) = get<1>(args.problem_shape); + get<1>(problem_shape) = get<0>(args.problem_shape); + } + auto problem_shape_MNKL = append<4>(problem_shape, 1); + + // Get SM count if needed, otherwise use user supplied SM count + int sm_count = args.hw_info.sm_count; + if (sm_count <= 0) { + CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n" + " For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count."); + sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id); + } + + CUTLASS_TRACE_HOST("to_underlying_arguments(): Setting persistent grid SM count to " << sm_count); + + KernelHardwareInfo hw_info{args.hw_info.device_id, sm_count}; + TileSchedulerParams scheduler = TileScheduler::to_underlying_arguments( + problem_shape_MNKL, TileShape{}, ClusterShape{}, hw_info, args.scheduler, workspace); + + return { + args.mode, + problem_shape, + CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace), + CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace), + hw_info, + scheduler, + workspace + }; + } + + CUTLASS_HOST_DEVICE static + bool + can_implement(Arguments const& args) { + bool implementable = (args.mode == GemmUniversalMode::kGemm) or + (args.mode == GemmUniversalMode::kBatched && rank(ProblemShape{}) == 4); + if (!implementable) { + CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n"); + return implementable; + } + implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); + implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); + return implementable; + } + + static int + get_workspace_size(Arguments const& args) { + TileScheduler t; + return t.template get_workspace_size( + args.scheduler, args.problem_shape, args.hw_info, NumMmaWarpGroups); + } + + static + cutlass::Status + initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr) { + TileScheduler t; + return t.template initialize_workspace( + args.scheduler, workspace, stream, args.problem_shape, args.hw_info, NumMmaWarpGroups); + } + + // Computes the kernel launch grid shape based on runtime parameters + static dim3 + get_grid_shape(Params const& params) { + // Given device SM count, set grid size s.t. we do not launch more thread blocks than we can run concurrently + TileSchedulerArguments args{}; + if constexpr (!std::is_const_v) { + args.max_swizzle_size = 1 << params.scheduler.log_swizzle_size_; + } + args.raster_order = params.scheduler.raster_order_ == TileScheduler::RasterOrder::AlongN ? TileScheduler::RasterOrderOptions::AlongN : TileScheduler::RasterOrderOptions::AlongM; + return TileScheduler::get_grid_shape(params.problem_shape, TileShape{}, ClusterShape{}, params.hw_info, args); + } + + static dim3 + get_block_shape() { + return dim3(MaxThreadsPerBlock, 1, 1); + } + + CUTLASS_DEVICE + void + operator()(Params const& params, char* smem_buf) { + using namespace cute; + using X = Underscore; + + // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. + #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) + if constexpr(size<0>(typename TiledMma::AtomShape_MNK{}) == 64) { + printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); + return; + } + #endif + + // Preconditions + static_assert(size(TiledMma{}) == 256, "Cooperative kernel must have TiledMMA operating using 256 threads."); + static_assert(size<0>(TileShape{}) >= 128, + "Cooperative kernel requires Tile Size to be greater than or equal to 128 along the M-dimension."); + + static_assert(rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + + /* In the Cooperative kernel, Consumer0 and Consumer1 collaborate on the same tile */ + enum class WarpGroupRole { + Producer = 0, + Consumer0 = 1, + Consumer1 = 2 + }; + enum class ProducerWarpRole { + Mainloop = 0, + Warp1 = 1, + Epilogue = 2, + Warp3 = 3 + }; + + // Kernel level shared memory storage + SharedStorage& shared_storage = *reinterpret_cast(smem_buf); + + int thread_idx = int(threadIdx.x); + int lane_idx = canonical_lane_idx(); + int warp_idx = canonical_warp_idx_sync(); + int warp_idx_in_warp_group = warp_idx % NumWarpsPerWarpGroup; + int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup; + int mma_thread_idx = thread_idx % size(TiledMma{}); + auto warp_group_role = WarpGroupRole(canonical_warp_group_idx()); + auto producer_warp_role = ProducerWarpRole(warp_idx_in_warp_group); + int lane_predicate = cute::elect_one_sync(); + uint32_t block_rank_in_cluster = cute::block_rank_in_cluster(); + + // Issue Tma Descriptor Prefetch from a single thread + if ((warp_idx == 0) && lane_predicate) { + CollectiveMainloop::prefetch_tma_descriptors(params.mainloop); + CollectiveEpilogue::prefetch_tma_descriptors(params.epilogue); + } + + // Mainloop Load pipeline + using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline; + typename MainloopPipeline::Params mainloop_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Mainloop) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; + } + mainloop_pipeline_params.is_leader = warp_group_thread_idx == 0; + mainloop_pipeline_params.num_consumers = size(TiledMma{}); + mainloop_pipeline_params.transaction_bytes = CollectiveMainloop::TmaTransactionBytes; + MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params); + + // Epilogue Load pipeline + using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline; + typename EpiLoadPipeline::Params epi_load_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Epilogue) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer; + } + epi_load_pipeline_params.dst_blockid = cute::block_rank_in_cluster(); + epi_load_pipeline_params.producer_arv_count = NumThreadsPerWarp; + epi_load_pipeline_params.consumer_arv_count = size(TiledMma{}); + epi_load_pipeline_params.transaction_bytes = CollectiveEpilogue::TmaTransactionBytes; + EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params); + + // Epilogue Store pipeline + using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline; + typename EpiStorePipeline::Params epi_store_pipeline_params; + epi_store_pipeline_params.always_wait = true; + EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params); + + typename LoadWarpOrderBarrier::Params params_load_order_barrier; + params_load_order_barrier.group_id = producer_warp_role == ProducerWarpRole::Mainloop ? 0 : 1; + params_load_order_barrier.group_size = NumThreadsPerWarp; + LoadWarpOrderBarrier load_order_barrier(shared_storage.pipelines.load_order, params_load_order_barrier); + + // Initialize starting pipeline states for the collectives + // Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding) + typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state; + typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state; + + // For the DMA Load (producer) we start with an opposite phase + // i.e., we skip all waits since we know that the buffer is indeed empty + PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state(); + + auto cluster_wait_fn = [&] () { + // We need this to guarantee that the Pipeline init is visible + // To all producers and consumer thread blocks in the Cluster + if constexpr (size(ClusterShape{}) > 1) { + cute::cluster_arrive_relaxed(); + return [] () { cute::cluster_wait(); }; + } + else { + __syncthreads(); + return [] () {}; // do nothing + } + } (); + + // Separate out problem shape for convenience + // Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK) + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + auto M = get<0>(problem_shape_MNKL); + auto N = get<1>(problem_shape_MNKL); + auto K = get<2>(problem_shape_MNKL); + auto L = get<3>(problem_shape_MNKL); + + // TMA requires special handling of strides to deal with coord codomain mapping + // Represent the full tensors -- get these from TMA + Tensor mA_mkl = params.mainloop.tma_load_a.get_tma_tensor(make_shape(M,K,L)); // (m,k,l) + Tensor mB_nkl = params.mainloop.tma_load_b.get_tma_tensor(make_shape(N,K,L)); // (n,k,l) + + // Get the appropriate blocks for this thread block -- potential for thread block locality + TiledMma tiled_mma; + auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) + + // Make tiled views, defer the slice + Tensor gA_mkl = local_tile(mA_mkl, blk_shape, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l) + Tensor gB_nkl = local_tile(mB_nkl, blk_shape, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l) + + // Get pipeline stage increments from tensor shapes + auto k_tile_count = size<3>(gA_mkl); + + TileScheduler scheduler{params.scheduler}; + auto work_tile_info = scheduler.get_current_work(); + + // In a warp specialized kernel, collectives expose data movement and compute operations separately + CollectiveMainloop collective_mainloop; + CollectiveEpilogue collective_epilogue(params.epilogue, shared_storage.tensors.epilogue); + + // Wait for all thread blocks in the Cluster + cluster_wait_fn(); + + if (warp_group_role == WarpGroupRole::Producer) { + cutlass::arch::warpgroup_reg_dealloc(); + + // Mainloop Producer Warp + if (producer_warp_role == ProducerWarpRole::Mainloop) { + bool do_load_order_arrive = true; + while (work_tile_info.is_valid_tile) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + // Slice with our work tile coordinates to construct mainloop tensor views + Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k) + Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k) + + // Get the number of K tiles to compute for this work as well as the starting K tile offset of the work. + auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape); + auto work_k_tile_start = TileScheduler::get_work_k_tile_start(work_tile_info); + auto k_tile_iter = cute::make_coord_iterator(idx2crd(work_k_tile_start, shape<2>(gA)), shape<2>(gA)); + + collective_mainloop.load( + mainloop_pipeline, + mainloop_pipe_producer_state, + gA, params.mainloop.tma_load_a, + gB, params.mainloop.tma_load_b, + k_tile_iter, work_k_tile_count, + lane_idx, + block_rank_in_cluster, + shared_storage.tensors.mainloop + ); + // Update starting pipeline state for the next tile + mainloop_pipe_producer_state.advance(work_k_tile_count); + + // Signal for the epilogue load warp to begin + if (do_load_order_arrive) { + load_order_barrier.arrive(); + do_load_order_arrive = false; + } + + // Get next work tile + work_tile_info = fetch_next_work(work_tile_info, scheduler); + } // Scheduler work fetch loop + + // Make sure all Consumer Warp Groups have been waited upon + collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state); + } // Mainloop Producer Warp End + + // Epilogue Producer Warp + else if (producer_warp_role == ProducerWarpRole::Epilogue && collective_epilogue.is_producer_load_needed()) { + load_order_barrier.wait(); + while (work_tile_info.is_valid_tile) { + if (TileScheduler::compute_epilogue(work_tile_info)) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + epi_load_pipe_producer_state = + collective_epilogue.load( + epi_load_pipeline, + epi_load_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + tiled_mma, + lane_idx, + shared_storage.tensors.epilogue + ); + } + + // Get next work tile + work_tile_info = fetch_next_work(work_tile_info, scheduler); + } // Scheduler work fetch loop + + // Make sure all Consumer Warp Groups have been waited upon + collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state); + } // Epilogue Producer Warp End + } // Producer Warp Group End + + else if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { + cutlass::arch::warpgroup_reg_alloc(); + + // Do we potentially issue tail arrives for TMA stores, if epilogue load is waiting for it + bool do_store_tail = false; + while (work_tile_info.is_valid_tile) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape); + + // Allocate the the accumulators for the (M,N) blk_shape + // + // MSVC CTAD breaks if we say "Tensor" here, so we use "auto" instead. + auto accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) + + collective_mainloop.mma( + mainloop_pipeline, + mainloop_pipe_consumer_state, + accumulators, + work_k_tile_count, + mma_thread_idx, + shared_storage.tensors.mainloop, + params.mainloop + ); + + // Make sure the math instructions are done and free buffers before entering the epilogue + collective_mainloop.mma_tail( + mainloop_pipeline, + mainloop_pipe_consumer_state, + work_k_tile_count + ); + + // Update starting mainloop pipeline state for the next tile + mainloop_pipe_consumer_state.advance(work_k_tile_count); + + // Index of warp group within consumer warp groups + int consumer_warp_group_idx = canonical_warp_group_idx() - NumLoadWarpGroups; + + // Perform reduction across splits, if needed + TileScheduler::fixup( + params.scheduler, work_tile_info, accumulators, NumMmaWarpGroups, consumer_warp_group_idx); + + if (TileScheduler::compute_epilogue(work_tile_info)) { + // Epilogue and write to gD + auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] = + collective_epilogue.store( + epi_load_pipeline, + epi_load_pipe_consumer_state, + epi_store_pipeline, + epi_store_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + accumulators, + tiled_mma, + mma_thread_idx, + shared_storage.tensors.epilogue + ); + epi_load_pipe_consumer_state = epi_load_pipe_consumer_state_next; + epi_store_pipe_producer_state = epi_store_pipe_producer_state_next; + do_store_tail = true; + } + + // Get next work tile + work_tile_info = fetch_next_work(work_tile_info, scheduler); + } // Scheduler work fetch loop + + if (do_store_tail) { + collective_epilogue.store_tail( + epi_load_pipeline, + epi_load_pipe_consumer_state, + epi_store_pipeline, + epi_store_pipe_producer_state + ); + } + } // Consumer Warp Groups End + } + +private: + // Kernel helper function to get next work unit + CUTLASS_DEVICE + typename TileScheduler::WorkTileInfo + fetch_next_work( + typename TileScheduler::WorkTileInfo& work_tile_info, + TileScheduler& scheduler) const { + // Check whether we should continue on with the current work unit. If this is the case, + // the work unit will have been updated in continue_current_work to reflect the new + // tile to be computed. + if (scheduler.continue_current_work(work_tile_info)) { + return work_tile_info; + } + + // Get next work tile + scheduler.advance_to_next_work(); + return scheduler.get_current_work(); + } +}; + +/////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::gemm::kernel diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_pingpong.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_pingpong.hpp new file mode 100644 index 0000000000000000000000000000000000000000..dd1f5a6b0ca62d9efc755850c284934ffd731b7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_pingpong.hpp @@ -0,0 +1,585 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/kernel_hardware_info.hpp" +#include "cutlass/fast_math.h" +#include "cute/arch/cluster_sm90.hpp" +#include "cutlass/arch/reg_reconfig.h" +#include "cutlass/arch/mma_sm90.h" +#include "cutlass/epilogue/collective/detail.hpp" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/dispatch_policy.hpp" +#include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" +#include "cutlass/pipeline/pipeline.hpp" +#include "cutlass/trace.h" + +#include "cute/tensor.hpp" + +/////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::gemm::kernel { + +/////////////////////////////////////////////////////////////////////////////// + +template < + class ProblemShape_, + class CollectiveMainloop_, + class CollectiveEpilogue_, + class TileScheduler_ +> +class GemmUniversal< + ProblemShape_, + CollectiveMainloop_, + CollectiveEpilogue_, + TileScheduler_, + cute::enable_if_t>> +{ +public: + // + // Type Aliases + // + using ProblemShape = ProblemShape_; + static_assert(rank(ProblemShape{}) == 3 or rank(ProblemShape{}) == 4, + "ProblemShape{} should be or "); + + // Mainloop derived types + using CollectiveMainloop = CollectiveMainloop_; + using TileShape = typename CollectiveMainloop::TileShape; + using TiledMma = typename CollectiveMainloop::TiledMma; + using ArchTag = typename CollectiveMainloop::ArchTag; + using ElementA = typename CollectiveMainloop::ElementA; + using StrideA = typename CollectiveMainloop::StrideA; + using ElementB = typename CollectiveMainloop::ElementB; + using StrideB = typename CollectiveMainloop::StrideB; + using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; + using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; + using ClusterShape = typename DispatchPolicy::ClusterShape; + using MainloopArguments = typename CollectiveMainloop::Arguments; + using MainloopParams = typename CollectiveMainloop::Params; + static_assert(ArchTag::kMinComputeCapability >= 90); + + // Epilogue derived types + using CollectiveEpilogue = CollectiveEpilogue_; + using ElementC = typename CollectiveEpilogue::ElementC; + using StrideC = typename CollectiveEpilogue::StrideC; + using ElementD = typename CollectiveEpilogue::ElementD; + using StrideD = typename CollectiveEpilogue::StrideD; + using EpilogueArguments = typename CollectiveEpilogue::Arguments; + using EpilogueParams = typename CollectiveEpilogue::Params; + + static_assert(cute::is_void_v or cute::is_same_v, + "Ping-pong kernel only supports the default scheduler."); + using TileSchedulerTag = TileScheduler_; + using TileScheduler = typename detail::TileSchedulerSelector< + TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler; + using TileSchedulerArguments = typename TileScheduler::Arguments; + using TileSchedulerParams = typename TileScheduler::Params; + + static constexpr uint32_t NumLoadWarpGroups = 1; + static constexpr uint32_t NumMmaWarpGroups = 2; + static constexpr uint32_t MaxThreadsPerBlock = size(TiledMma{}) + (NumMmaWarpGroups * NumThreadsPerWarpGroup); + static constexpr uint32_t MinBlocksPerMultiprocessor = 1; + + /// Register requirement for Load and Math WGs + static constexpr uint32_t LoadRegisterRequirement = 40; + static constexpr uint32_t MmaRegisterRequirement = 232; + + // 1 stage ordered sequence between mainloop and epilogue producer load threads + using LoadWarpOrderBarrier = cutlass::OrderedSequenceBarrier<1,2>; + + // Order Sequence barrier with two stages: one for Mainloop and one for Epilogue + static constexpr uint32_t StagesPerMathWarpGroup = 2; + using MathWarpGroupOrderBarrier = cutlass::OrderedSequenceBarrier< + StagesPerMathWarpGroup, NumMmaWarpGroups>; + + // Kernel level shared memory storage + struct SharedStorage { + struct TensorStorage : cute::aligned_struct<128> { + using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage; + using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage; + + MainloopTensorStorage mainloop; + EpilogueTensorStorage epilogue; + } tensors; + + struct PipelineStorage : cute::aligned_struct<16> { + using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage; + using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage; + using MathWarpGroupOrderBarrierStorage = typename MathWarpGroupOrderBarrier::SharedStorage; + + alignas(16) MainloopPipelineStorage mainloop; + alignas(16) EpiLoadPipelineStorage epi_load; + alignas(16) MathWarpGroupOrderBarrierStorage math_wg_order; + alignas(16) typename LoadWarpOrderBarrier::SharedStorage load_order; + } pipelines; + }; + + static constexpr int SharedStorageSize = sizeof(SharedStorage); + + // Device side arguments + struct Arguments { + GemmUniversalMode mode{}; + ProblemShape problem_shape{}; + MainloopArguments mainloop{}; + EpilogueArguments epilogue{}; + KernelHardwareInfo hw_info{}; + TileSchedulerArguments scheduler{}; + }; + + // Kernel entry point API + struct Params { + GemmUniversalMode mode; + ProblemShape problem_shape; + MainloopParams mainloop; + EpilogueParams epilogue; + KernelHardwareInfo hw_info; + TileSchedulerParams scheduler; + }; + + // + // Methods + // + + // Convert to underlying arguments. In this case, a simple copy for the aliased type. + static + Params + to_underlying_arguments(Arguments const& args, void* workspace) { + CUTLASS_TRACE_HOST("to_underlying_arguments():"); + + (void) workspace; + auto problem_shape = args.problem_shape; + if constexpr (detail::IF_SWAP_AB::value) { + // swap M/N + get<0>(problem_shape) = get<1>(args.problem_shape); + get<1>(problem_shape) = get<0>(args.problem_shape); + } + auto problem_shape_MNKL = append<4>(problem_shape, 1); + + // Get SM count if needed, otherwise use user supplied SM count + int sm_count = args.hw_info.sm_count; + if (sm_count <= 0) { + CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n" + " For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count."); + sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id); + } + + CUTLASS_TRACE_HOST("to_underlying_arguments(): Setting persistent grid SM count to " << sm_count); + KernelHardwareInfo hw_info{args.hw_info.device_id, sm_count}; + + return { + args.mode, + problem_shape, + CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace), + CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace), + hw_info, + TileScheduler::to_underlying_arguments(problem_shape_MNKL, TileShape{}, ClusterShape{}, hw_info, args.scheduler) + }; + } + + CUTLASS_HOST_DEVICE static + bool + can_implement(Arguments const& args) { + bool implementable = (args.mode == GemmUniversalMode::kGemm) or + (args.mode == GemmUniversalMode::kBatched && rank(ProblemShape{}) == 4); + if (!implementable) { + CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n"); + return implementable; + } + implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); + implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); + return implementable; + } + + static + int + get_workspace_size(Arguments const& args) { + return 0; + } + + static + cutlass::Status + initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr) { + return Status::kSuccess; + } + + // Computes the kernel launch grid shape based on runtime parameters + static dim3 + get_grid_shape(Params const& params) { + // Given device SM count, set grid size s.t. we do not launch more thread blocks than we can run concurrently + TileSchedulerArguments args{}; + if constexpr (!std::is_const_v) { + args.max_swizzle_size = 1 << params.scheduler.log_swizzle_size_; + } + args.raster_order = params.scheduler.raster_order_ == TileScheduler::RasterOrder::AlongN ? TileScheduler::RasterOrderOptions::AlongN : TileScheduler::RasterOrderOptions::AlongM; + return TileScheduler::get_grid_shape(params.problem_shape, TileShape{}, ClusterShape{}, params.hw_info, args); + } + + static dim3 + get_block_shape() { + return dim3(MaxThreadsPerBlock, 1, 1); + } + + CUTLASS_DEVICE + void + operator()(Params const& params, char* smem_buf) { + using namespace cute; + using X = Underscore; + + // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. + #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) + if constexpr(size<0>(typename TiledMma::AtomShape_MNK{}) == 64) { + printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); + return; + } + #endif + + // Preconditions + static_assert(rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + + enum class WarpGroupRole { + Producer = 0, + Consumer0 = 1, + Consumer1 = 2 + }; + enum class ProducerWarpRole { + Mainloop = 0, + Warp1 = 1, + Epilogue = 2, + Warp3 = 3 + }; + + // Kernel level shared memory storage + SharedStorage& shared_storage = *reinterpret_cast(smem_buf); + + int thread_idx = int(threadIdx.x); + int lane_idx = canonical_lane_idx(); + int warp_idx = canonical_warp_idx_sync(); + int warp_idx_in_warp_group = warp_idx % NumWarpsPerWarpGroup; + int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup; + auto warp_group_role = WarpGroupRole(canonical_warp_group_idx()); + auto producer_warp_role = ProducerWarpRole(warp_idx_in_warp_group); + int lane_predicate = cute::elect_one_sync(); + uint32_t block_rank_in_cluster = cute::block_rank_in_cluster(); + + // Issue Tma Descriptor Prefetch from a single thread + if ((warp_idx == 0) && lane_predicate) { + CollectiveMainloop::prefetch_tma_descriptors(params.mainloop); + CollectiveEpilogue::prefetch_tma_descriptors(params.epilogue); + } + + // Mainloop Load pipeline + using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline; + typename MainloopPipeline::Params mainloop_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Mainloop) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; + } + mainloop_pipeline_params.is_leader = warp_group_thread_idx == 0; + mainloop_pipeline_params.num_consumers = NumThreadsPerWarpGroup; + mainloop_pipeline_params.transaction_bytes = CollectiveMainloop::TmaTransactionBytes; + MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params); + + // Epilogue Load pipeline + using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline; + typename EpiLoadPipeline::Params epi_load_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Epilogue) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer; + } + epi_load_pipeline_params.dst_blockid = cute::block_rank_in_cluster(); + epi_load_pipeline_params.producer_arv_count = NumThreadsPerWarp; + epi_load_pipeline_params.consumer_arv_count = NumThreadsPerWarpGroup; + epi_load_pipeline_params.transaction_bytes = CollectiveEpilogue::TmaTransactionBytes; + EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params); + + // Epilogue Store pipeline + using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline; + typename EpiStorePipeline::Params epi_store_pipeline_params; + epi_store_pipeline_params.always_wait = true; + EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params); + + typename LoadWarpOrderBarrier::Params params_load_order_barrier; + params_load_order_barrier.group_id = producer_warp_role == ProducerWarpRole::Mainloop ? 0 : 1; + params_load_order_barrier.group_size = NumThreadsPerWarp; + LoadWarpOrderBarrier load_order_barrier(shared_storage.pipelines.load_order, params_load_order_barrier); + + typename MathWarpGroupOrderBarrier::Params params_math_wg_order_barrier; + // DMA Load WG will not participate in these Ordered Barrier syncs + params_math_wg_order_barrier.group_id = canonical_warp_group_idx() - static_cast(WarpGroupRole::Consumer0); + params_math_wg_order_barrier.group_size = NumThreadsPerWarpGroup; // Number of threads / participants in a group + MathWarpGroupOrderBarrier math_wg_order_barrier(shared_storage.pipelines.math_wg_order, params_math_wg_order_barrier); + + // Initialize starting pipeline states for the collectives + // Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding) + typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state; + typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state; + + // For the DMA Load (producer) we start with an opposite phase + // i.e., we skip all waits since we know that the buffer is indeed empty + PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state(); + + auto cluster_wait_fn = [&] () { + // We need this to guarantee that the Pipeline init is visible + // To all producers and consumer thread blocks in the Cluster + if constexpr (size(ClusterShape{}) > 1) { + cute::cluster_arrive_relaxed(); + return [] () { cute::cluster_wait(); }; + } + else { + __syncthreads(); + return [] () {}; // do nothing + } + } (); + + // Separate out problem shape for convenience + // Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK) + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + auto M = get<0>(problem_shape_MNKL); + auto N = get<1>(problem_shape_MNKL); + auto K = get<2>(problem_shape_MNKL); + auto L = get<3>(problem_shape_MNKL); + + // TMA requires special handling of strides to deal with coord codomain mapping + // Represent the full tensors -- get these from TMA + Tensor mA_mkl = params.mainloop.tma_load_a.get_tma_tensor(make_shape(M,K,L)); // (m,k,l) + Tensor mB_nkl = params.mainloop.tma_load_b.get_tma_tensor(make_shape(N,K,L)); // (n,k,l) + + // Get the appropriate blocks for this thread block -- potential for thread block locality + TiledMma tiled_mma; + auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) + + // Make tiled views, defer the slice + Tensor gA_mkl = local_tile(mA_mkl, blk_shape, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l) + Tensor gB_nkl = local_tile(mB_nkl, blk_shape, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l) + + // Get pipeline stage increments from tensor shapes + auto k_tile_count = size<3>(gA_mkl); + auto c_tile_count = CollectiveEpilogue::get_load_pipe_increment(blk_shape); + auto d_tile_count = CollectiveEpilogue::get_store_pipe_increment(blk_shape); + + TileScheduler scheduler{params.scheduler}; + + if (warp_group_role == WarpGroupRole::Consumer1) { + // Advance 2nd Math WG to the next work tile for the startup + scheduler.advance_to_next_work(); + // Advance 2nd Math WG pipeline states to the end of 1st Math WG + mainloop_pipe_consumer_state.advance(k_tile_count); + epi_load_pipe_consumer_state.advance(c_tile_count); + epi_store_pipe_producer_state.advance(d_tile_count); + } + auto work_tile_info = scheduler.get_current_work(); + + // In a warp specialized kernel, collectives expose data movement and compute operations separately + CollectiveMainloop collective_mainloop; + CollectiveEpilogue collective_epilogue(params.epilogue, shared_storage.tensors.epilogue); + + // Wait for all thread blocks in the Cluster + cluster_wait_fn(); + + if (warp_group_role == WarpGroupRole::Producer) { + cutlass::arch::warpgroup_reg_dealloc(); + + // Mainloop Producer Warp + if (producer_warp_role == ProducerWarpRole::Mainloop) { + bool do_load_order_arrive = true; + while (work_tile_info.is_valid_tile) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + // Slice with our work tile coordinates to construct mainloop tensor views + Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k) + Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k) + + auto k_tile_iter = cute::make_coord_iterator(shape<2>(gA)); + + collective_mainloop.load( + mainloop_pipeline, + mainloop_pipe_producer_state, + gA, params.mainloop.tma_load_a, + gB, params.mainloop.tma_load_b, + k_tile_iter, k_tile_count, + lane_idx, + block_rank_in_cluster, + shared_storage.tensors.mainloop + ); + // Update starting pipeline state for the next tile + mainloop_pipe_producer_state.advance(k_tile_count); + + // Signal for the epilogue load warp to begin + if (do_load_order_arrive) { + load_order_barrier.arrive(); + do_load_order_arrive = false; + } + + // Get next work tile + scheduler.advance_to_next_work(); + work_tile_info = scheduler.get_current_work(); + } // Scheduler work fetch loop + + // Make sure all Consumer Warp Groups have been waited upon + collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state); + } // Mainloop Producer Warp End + + // Epilogue Producer Warp + else if (producer_warp_role == ProducerWarpRole::Epilogue && collective_epilogue.is_producer_load_needed()) { + load_order_barrier.wait(); + while (work_tile_info.is_valid_tile) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + epi_load_pipe_producer_state = + collective_epilogue.load( + epi_load_pipeline, + epi_load_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + tiled_mma, + lane_idx, + shared_storage.tensors.epilogue + ); + + // Get next work tile + scheduler.advance_to_next_work(); + work_tile_info = scheduler.get_current_work(); + } // Scheduler work fetch loop + + // Make sure all Consumer Warp Groups have been waited upon + collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state); + } // Epilogue Producer Warp End + } // Producer Warp Group End + + else if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { + cutlass::arch::warpgroup_reg_alloc(); + + while (work_tile_info.is_valid_tile) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + // Allocate the the accumulators for the (M,N) blk_shape + Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) + + // Order two Math WG's MMA one after the other, helps hide Epilogue + math_wg_order_barrier.wait(); + + collective_mainloop.mma( + mainloop_pipeline, + mainloop_pipe_consumer_state, + accumulators, + k_tile_count, + thread_idx, + shared_storage.tensors.mainloop, + params.mainloop + ); + + // Cue for next Math WG's MMA to start + math_wg_order_barrier.arrive(); + + // Make sure the math instructions are done and free buffers before entering the epilogue + collective_mainloop.mma_tail( + mainloop_pipeline, + mainloop_pipe_consumer_state, + k_tile_count + ); + // Update starting mainloop pipeline state for the next tile + mainloop_pipe_consumer_state.advance(k_tile_count * NumMmaWarpGroups); + + // Order two Math WG's Epilogue one after the other + math_wg_order_barrier.wait(); + + // Epilogue and write to gD + auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] = + collective_epilogue.store( + epi_load_pipeline, + epi_load_pipe_consumer_state, + epi_store_pipeline, + epi_store_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + accumulators, + tiled_mma, + warp_group_thread_idx, + shared_storage.tensors.epilogue + ); + + // TMA store pipeline wait is only visible to TMA-issuing warp, so for multiple-consumer kernels + // we need to wait for all TMA stores to complete before issuing consumer order barrier arrives + // to ensure next math consumer doesn't overwrite smem of in-flight TMA stores of current consumer. + auto [epi_load_pipe_consumer_state_next_, epi_store_pipe_producer_state_next_] = + collective_epilogue.store_tail( + epi_load_pipeline, + epi_load_pipe_consumer_state_next, + epi_store_pipeline, + epi_store_pipe_producer_state_next + ); + + // Update starting load/store pipeline states for the next tile + // state has already been incremented by 1 tile in collective calls, advance once again for ping pong + epi_load_pipe_consumer_state = epi_load_pipe_consumer_state_next_; + epi_store_pipe_producer_state = epi_store_pipe_producer_state_next_; + epi_load_pipe_consumer_state.advance(c_tile_count); + epi_store_pipe_producer_state.advance(d_tile_count); + + // Cue for next Math WG's Epilogue to start + math_wg_order_barrier.arrive(); + + // Get next work tile + scheduler.advance_to_next_work(NumMmaWarpGroups); + work_tile_info = scheduler.get_current_work(); + } // Scheduler work fetch loop + } // Consumer Warp Groups End + } +}; + +/////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::gemm::kernel diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp new file mode 100644 index 0000000000000000000000000000000000000000..8fb60d9004ac926dff9b09ee8a2e2228cb78607c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp @@ -0,0 +1,303 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/fast_math.h" +#include "cutlass/gemm_coord.hpp" +#include "cutlass/kernel_hardware_info.hpp" +#include "cutlass/gemm/kernel/tile_scheduler_params.h" +#include "cute/layout.hpp" +#include "cute/tensor.hpp" +#include "cute/arch/cluster_sm90.hpp" + +namespace cutlass::gemm::kernel::detail { + +/////////////////////////////////////////////////////////////////////////////// + +// Persistent Thread Block (TB) scheduler +class PersistentTileSchedulerSm90 { + // + // Data members + // + +private: + uint64_t current_work_linear_idx_; + +public: + struct WorkTileInfo { + int32_t M_idx = 0; + int32_t N_idx = 0; + int32_t L_idx = 0; + bool is_valid_tile = false; + }; + + using Params = PersistentTileSchedulerSm90Params; + using RasterOrder = typename Params::RasterOrder; + using RasterOrderOptions = typename Params::RasterOrderOptions; + + struct Arguments { + int max_swizzle_size = 1; + RasterOrderOptions raster_order = RasterOrderOptions::Heuristic; + }; + + // Sink scheduler params as a member + Params scheduler_params; + + // + // Methods + // + + template + static Params + to_underlying_arguments( + ProblemShapeMNKL problem_shape_mnkl, + TileShape tile_shape, + ClusterShape cluster_shape, + [[maybe_unused]] KernelHardwareInfo const& hw_info, + Arguments const& arguments, + [[maybe_unused]] void* workspace=nullptr) { + + // We only need the tile and cluster shape during scheduler setup, so let FTAD do the magic + static_assert(cute::is_static::value); + static_assert(cute::is_static::value); + + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); + + Params params; + params.initialize( + problem_blocks, + to_gemm_coord(cluster_shape), + hw_info, + arguments.max_swizzle_size, + arguments.raster_order + ); + + return params; + } + + CUTLASS_HOST_DEVICE + PersistentTileSchedulerSm90() { }; + + CUTLASS_DEVICE explicit PersistentTileSchedulerSm90(Params const& params_) : scheduler_params(params_) { + // MSVC requires protecting use of CUDA-specific nonstandard syntax, + // like blockIdx and gridDim, with __CUDA_ARCH__. +#if defined(__CUDA_ARCH__) + if (params_.raster_order_ == RasterOrder::AlongN) { + current_work_linear_idx_ = uint64_t(blockIdx.x) + uint64_t(blockIdx.y) * uint64_t(gridDim.x); + } + else { + current_work_linear_idx_ = uint64_t(blockIdx.x) * uint64_t(gridDim.y) + uint64_t(blockIdx.y); + } +#else + CUTLASS_ASSERT(false && "This line should never be reached"); +#endif + } + + CUTLASS_DEVICE + WorkTileInfo + get_current_work() const { + return get_current_work_for_linear_idx(current_work_linear_idx_); + } + + CUTLASS_DEVICE + WorkTileInfo + get_current_work_for_linear_idx(uint64_t linear_idx) const { + // Map worker's linear index into the CTA tiled problem shape to the corresponding MNL indices + uint64_t work_idx_l, remainder; + scheduler_params.divmod_batch_(work_idx_l, remainder, linear_idx); + + uint64_t blk_per_grid_dim = scheduler_params.divmod_cluster_shape_minor_.divide(remainder); + + auto [work_idx_m, work_idx_n] = get_work_idx_m_and_n(blk_per_grid_dim, + scheduler_params.divmod_cluster_shape_major_, + scheduler_params.divmod_cluster_shape_minor_, + scheduler_params.divmod_cluster_blk_major_, + scheduler_params.log_swizzle_size_, + scheduler_params.raster_order_); + + return {work_idx_m, work_idx_n, static_cast(work_idx_l), linear_idx < scheduler_params.blocks_per_problem_}; + } + + CUTLASS_DEVICE + void + advance_to_next_work(uint32_t advance_count = 1) { + // MSVC requires protecting use of CUDA-specific nonstandard syntax, + // like blockIdx and gridDim, with __CUDA_ARCH__. +#if defined(__CUDA_ARCH__) + current_work_linear_idx_ += uint64_t(gridDim.x) * uint64_t(gridDim.y) * uint64_t(gridDim.z) * uint64_t(advance_count); +#else + CUTLASS_ASSERT(false && "This line should never be reached"); +#endif + } + + // get work_idx_m, work_idx_n from blk_per_grid_dim while applying swizzle + static CUTLASS_DEVICE + cute::tuple + get_work_idx_m_and_n( + uint64_t blk_per_grid_dim, + FastDivmodU64 const& divmod_cluster_shape_major, + FastDivmodU64 const& divmod_cluster_shape_minor, + FastDivmodU64 const& divmod_cluster_blk_major, + int32_t log_swizzle_size, + RasterOrder raster_order) { + + uint64_t cluster_id, cluster_major_offset = 0, cluster_minor_offset = 0; + divmod_cluster_shape_major(cluster_id, cluster_major_offset, blk_per_grid_dim); + + auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster(); + if (raster_order == RasterOrder::AlongN) { + cluster_minor_offset = cta_m_in_cluster; + } + else { + cluster_minor_offset = cta_n_in_cluster; + } + + uint64_t cluster_idx_minor, cluster_idx_major; + + uint64_t cluster_idx_minor_div_swizzle, extra, offset; + + offset = cluster_id & ((1 << log_swizzle_size) - 1); + extra = cluster_id >> log_swizzle_size; + + divmod_cluster_blk_major(cluster_idx_minor_div_swizzle, cluster_idx_major, extra); + + cluster_idx_minor = cluster_idx_minor_div_swizzle * (1 << log_swizzle_size) + offset; + + auto minor_work_idx = static_cast(cluster_idx_minor * divmod_cluster_shape_minor.divisor + + cluster_minor_offset); + auto major_work_idx = static_cast(cluster_idx_major * divmod_cluster_shape_major.divisor + + cluster_major_offset); + + if (raster_order == RasterOrder::AlongN) { + return {minor_work_idx, major_work_idx}; + } + else { + return {major_work_idx, minor_work_idx}; + } + + } + + // Given the inputs, computes the total number of output blocks this problem will compute over + // Note that this is only the logical size of our grid, not the physical grid we will actually launch. + template + CUTLASS_HOST_DEVICE static + dim3 + get_tiled_cta_shape_mnl(ProblemShapeMNKL problem_shape_mnkl, BlockShape cta_shape, ClusterShape cluster_shape) { + auto cta_m = cute::size(cute::ceil_div(cute::shape<0>(problem_shape_mnkl), cute::shape<0>(cta_shape))); + auto cta_n = cute::size(cute::ceil_div(cute::shape<1>(problem_shape_mnkl), cute::shape<1>(cta_shape))); + + return Params::get_tiled_cta_shape_mnl( + to_gemm_coord(problem_shape_mnkl), + to_gemm_coord(cluster_shape), + cta_m, cta_n + ); + } + + // Given the inputs, computes the physical grid we should launch. + template + CUTLASS_HOST_DEVICE static + dim3 + get_grid_shape( + ProblemShapeMNKL problem_shape_mnk, + BlockShape cta_shape, + ClusterShape cluster_shape, + KernelHardwareInfo hw_info, + Arguments arguments, + bool truncate_by_problem_size=true) { + + auto problem_shape_mnkl = cute::append<4>(problem_shape_mnk, cute::Int<1>{}); + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, cta_shape, cluster_shape); + + return Params::get_grid_shape( + problem_blocks, + to_gemm_coord(cluster_shape), + hw_info, + arguments.max_swizzle_size, + arguments.raster_order, + /* truncate_by_problem_size = */true + ); + } + + // Returns whether the block assigned this work should compute the epilogue for the corresponding + // output tile. For the basic tile scheduler, this is always true. + CUTLASS_HOST_DEVICE + static bool + compute_epilogue(WorkTileInfo const&) { + return true; + } + + // Performs the reduction across splits for a given output tile. Since this scheduler does + // not split output tiles, no reduction is needed. + template + CUTLASS_DEVICE + static void + fixup(Params const&, WorkTileInfo const&, FrgTensorC&, uint32_t, uint32_t) {} + + // Returns whether the current WorkTileInfo passed in should continue to be used. Since + // this scheduler only schedules work in units of single, full output tiles, the WorkTileInfo + // passed in should not be used after having been processed. + CUTLASS_DEVICE + static bool + continue_current_work(WorkTileInfo&) { + return false; + } + + // The basic tile scheduler does not require any additional workspace + template + static int + get_workspace_size(Arguments const&, ProblemShape, KernelHardwareInfo const&, uint32_t) { + return 0; + } + + template + static cutlass::Status + initialize_workspace(Arguments const&, void*, cudaStream_t, ProblemShape, KernelHardwareInfo const&, uint32_t) { + return Status::kSuccess; + } + + template + CUTLASS_HOST_DEVICE + static int + get_work_k_tile_count(WorkTileInfo const& work_tile_info, ProblemShape problem_shape, TileShape tile_shape) { + // All work units returned by this scheduler cover the entire K iteration + // space of the output tile assigned to the work unit. + return cute::size(cute::ceil_div(cute::get<2>(problem_shape), cute::get<2>(tile_shape))); + } + + CUTLASS_HOST_DEVICE + static uint32_t + get_work_k_tile_start(WorkTileInfo const&) { + // All work units returned by this scheduler start from K tile 0 + return 0u; + } +}; + +} // namespace cutlass::gemm::kernel::detail diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ff9cb20972eff633b6860d959ddebab841fa5e64 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp @@ -0,0 +1,740 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include "cutlass/barrier.h" +#include "cutlass/block_striped.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" +#include "cutlass/kernel_hardware_info.hpp" +#include "cute/layout.hpp" +#include "cute/tensor.hpp" + +namespace cutlass::gemm::kernel::detail { + +// Persistent Thread Block (TB) scheduler leveraging stream-K decomposition +template < + class TileShape, + class ClusterShape +> +class PersistentTileSchedulerSm90StreamK { + // + // Data members + // + +private: + using UnderlyingScheduler = PersistentTileSchedulerSm90; + +private: + using UnderlyingArguments = typename UnderlyingScheduler::Arguments; + using UnderlyingParams = typename UnderlyingScheduler::Params; + + uint64_t current_work_linear_idx_ = 0; + +public: + + using RasterOrder = UnderlyingScheduler::RasterOrder; + using RasterOrderOptions = UnderlyingScheduler::RasterOrderOptions; + + // Use a dummy barrier manager to simply get the type used to store the barrier + using BarrierType = typename NamedBarrierManager<1>::T; + + struct WorkTileInfo { + int32_t M_idx = 0; + int32_t N_idx = 0; + int32_t K_idx = 0; + int32_t L_idx = 0; + bool is_valid_tile = false; + + // Number of splits to be used in computing the {L_idx, M_idx, N_idx} output tile. + // Splits = 1 indicates that this is a data-parallel block. + uint32_t splits = 1; + + // Number of k iterations to compute for the current tile + uint32_t k_tile_count = 0; + + // Number of k iterations remaining for the work unit as a whole + uint32_t k_tile_remaining = 0; + + // Whether this unit of work is the final split for the given tile + bool is_final_split = true; + }; + + using Params = PersistentTileSchedulerSm90StreamKParams; + using ReductionMode = Params::ReductionMode; + + struct Arguments { + + Arguments() = default; + Arguments(Arguments const&) = default; + Arguments(Arguments&&) = default; + + CUTLASS_HOST_DEVICE + Arguments& + operator=(Arguments const& args) { + splits = args.splits; + raster_order = args.raster_order; + return *this; + } + + CUTLASS_HOST_DEVICE + Arguments& + operator=(Arguments&& args) noexcept { + splits = args.splits; + raster_order = args.raster_order; + return *this; + } + + CUTLASS_HOST_DEVICE + Arguments(int splits_) : splits(splits_) {} + + // The splitting factor to be used in a split-K decomposition of the problem. + // If this is set to a value greater than 1, stream-K decomposition logic + // is bypassed in favor of a split-K decomposition. + int splits = 1; + const int max_swizzle_size = 1; + RasterOrderOptions raster_order = RasterOrderOptions::Heuristic; + ReductionMode reduction_mode = ReductionMode::Deterministic; + }; + + // Sink scheduler params as a member + Params scheduler_params; + + // + // Methods + // + + template + static Params + to_underlying_arguments( + ProblemShape problem_shape, + TileShape tile_shape, + ClusterShape cluster_shape, + KernelHardwareInfo const& hw_info, + Arguments const& args, + void* workspace) { + + static_assert(cute::is_static::value); + static_assert(cute::is_static::value); + + auto problem_shape_mnkl = cute::append<4>(problem_shape, cute::Int<1>{}); + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); + uint32_t k_tile_per_output_tile = cute::size(cute::ceil_div(cute::shape<2>(problem_shape_mnkl), cute::shape<2>(TileShape{}))); + + Params params; + params.initialize( + problem_blocks, + k_tile_per_output_tile, + to_gemm_coord(cluster_shape), + hw_info, + args.splits, + args.max_swizzle_size, + args.raster_order, + args.reduction_mode, + workspace + ); + return params; + } + + CUTLASS_HOST_DEVICE + PersistentTileSchedulerSm90StreamK() { }; + + CUTLASS_HOST_DEVICE + PersistentTileSchedulerSm90StreamK(Params const& params_) : scheduler_params(params_) { + if (params_.raster_order_ == RasterOrder::AlongN) { + current_work_linear_idx_ = uint64_t(blockIdx.x) + uint64_t(blockIdx.y) * uint64_t(gridDim.x); + } + else { + current_work_linear_idx_ = uint64_t(blockIdx.x) * uint64_t(gridDim.y) + uint64_t(blockIdx.y); + } + } + + CUTLASS_DEVICE + WorkTileInfo + get_current_work() const { + return get_current_work_for_linear_idx(current_work_linear_idx_, scheduler_params); + } + + CUTLASS_DEVICE + static WorkTileInfo + get_current_work_for_linear_idx(uint64_t linear_idx, Params const& params) { + if (linear_idx >= params.units_per_problem_) { + // Invalid work. Return an empty result. + return {0, 0, 0, 0, false, 0}; + } + + // Determine whether this work unit is a data-parallel or stream-K work unit + bool is_stream_k_unit = linear_idx < params.sk_units_; + + bool is_split_k = params.splits_ > 1; + + if (is_split_k || !is_stream_k_unit) { + // Bypass the stream-K scheduling logic for basic data-parallel or split-K work + return set_non_stream_k_work(linear_idx, params, is_split_k); + } + else { + // This is a stream-K work unit + WorkTileInfo work_tile_info; + set_stream_k_work(params, linear_idx, work_tile_info, /*new_unit = */ true); + return work_tile_info; + } + } + + // Returns whether the current work_tile_info passed in should continue to be used. This + // occurs only in the stream-K decomposition with stream-K work units, which encompass + // work over multiple output tiles. If the current work_tile_info should continue to be + // used, it is updated to advance to the next output tile it should cover. + CUTLASS_DEVICE + bool + continue_current_work(WorkTileInfo& work_tile_info) const { + return continue_current_work_for_linear_idx( + current_work_linear_idx_, work_tile_info, scheduler_params); + } + + CUTLASS_DEVICE static + bool + continue_current_work_for_linear_idx( + uint64_t linear_idx, + WorkTileInfo& work_tile_info, + Params const& params) { + + work_tile_info.k_tile_remaining -= work_tile_info.k_tile_count; + + if (work_tile_info.k_tile_remaining == 0) { + return false; + } + + set_stream_k_work(params, linear_idx, work_tile_info, /* new_unit = */ false); + return true; + } + + CUTLASS_DEVICE + void + advance_to_next_work(uint32_t advance_count = 1) { + current_work_linear_idx_ += uint64_t(gridDim.x) * uint64_t(gridDim.y) * uint64_t(gridDim.z) * uint64_t(advance_count); + } + + // Given the inputs, computes the total number of output blocks this problem will compute over + // Note that this is only the logical size of our grid, not the physical grid we will actually launch. + template + CUTLASS_HOST_DEVICE static + dim3 + get_tiled_cta_shape_mnl(ProblemShape problem_shape_mnkl, TileShape cta_shape, ClusterShape cluster_shape) { + return UnderlyingScheduler::get_tiled_cta_shape_mnl(problem_shape_mnkl, cta_shape, cluster_shape); + } + + // Given the cluster shape, computes the physical grid we should launch. + template + CUTLASS_HOST_DEVICE static + dim3 + get_grid_shape( + ProblemShape problem_shape, + TileShape tile_shape, + ClusterShape cluster_shape, + KernelHardwareInfo hw_info, + Arguments arguments) { + + auto problem_shape_mnkl = cute::append<4>(problem_shape, cute::Int<1>{}); + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); + + return Params::get_grid_shape( + problem_blocks, + to_gemm_coord(cluster_shape), + hw_info, + arguments.max_swizzle_size, + arguments.raster_order + ); + } + + // Returns whether fixup is needed for `work_tile_info`. + CUTLASS_HOST_DEVICE + static bool + requires_fixup(Params const& params, WorkTileInfo const& work_tile_info) { + // Fixup is not needed for data-parallel tiles + return work_tile_info.k_tile_count != params.k_tiles_per_output_tile_; + } + + // Performs the reduction across splits for a given output tile. + template + CUTLASS_DEVICE + static void + fixup( + Params const& params, + WorkTileInfo const& work_tile_info, + FrgTensorC& accumulators, + uint32_t num_barriers, + uint32_t barrier_idx) { + using BarrierManager = NamedBarrierManager; + return fixup_helper( + params, work_tile_info, accumulators, num_barriers, barrier_idx); + } + + // Helper for performing the reduction across splits for a given output tile. + template + CUTLASS_DEVICE + static void + fixup_helper( + Params const& params, + WorkTileInfo const& work_tile_info, + FrgTensorC& accumulators, + uint32_t num_barriers, + uint32_t barrier_idx) { + + using ElementAccumulator = typename FrgTensorC::value_type; + + if (!requires_fixup(params, work_tile_info)) { + return; + } + + auto tile_idx = output_tile_index(params, work_tile_info); + + // Index of the lock on which to wait + auto lock_idx = (tile_idx * num_barriers) + barrier_idx; + + // Reductions use BlockStripedReduce with a width of BarrierManager::ThreadCount under the hood. + // Thus, the start of the reduction space is the same across all threads in a warp group. + int reduction_offset = + (cute::size<0>(TileShape{}) * cute::size<1>(TileShape{}) * tile_idx) + + (size(accumulators) * barrier_idx * BarrierManager::ThreadCount); + + ElementAccumulator* group_reduction_workspace = reinterpret_cast(params.reduction_workspace_) + reduction_offset; + + using AccumulatorArrayT = Array; + using BlockStripedReduceT = BlockStripedReduce; + + AccumulatorArrayT* reduction_workspace_array = reinterpret_cast(group_reduction_workspace); + AccumulatorArrayT* accumulator_array = reinterpret_cast(&accumulators); + + int barrier_group_thread_idx = threadIdx.x % BarrierManager::ThreadCount; + + // The number of tiles for which reduction is required is either: + // (a) the total number of output tiles (in the case of split-K) + // (b) the number of stream-K tiles + // To calcualte the the total number of output tiles in the split-K case, we + // note that, in the split-K case, the units_per_problem_ member of Params will be + // the total number of output tiles multiplied by the number of splits. + auto reduction_tiles = params.splits_ > 1 ? (params.units_per_problem_ / params.splits_) : params.sk_tiles_; + auto reduction_workspace_size = Params::get_reduction_workspace_size( + reduction_tiles, to_gemm_coord(TileShape{}), sizeof_bits::value); + BarrierType* lock_workspace = reinterpret_cast( + reinterpret_cast(params.reduction_workspace_) + reduction_workspace_size); + + if (!work_tile_info.is_final_split) { + if (work_tile_info.K_idx == 0) { + // First peer initializes the workspace partials + BlockStripedReduceT::store(reduction_workspace_array, *accumulator_array, barrier_group_thread_idx); + } + else { + if (params.reduction_mode_ == ReductionMode::Deterministic) { + // Wait until the preceding split added its accumulators + BarrierManager::wait_eq(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, work_tile_info.K_idx); + } + else { + // Wait unitl the first split has stored its accumulators + BarrierManager::wait_lt(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, 1); + } + + // Perform reduction in workspace + BlockStripedReduceT::reduce(reduction_workspace_array, *accumulator_array, barrier_group_thread_idx); + } + + // Signal our arrival + BarrierManager::arrive_inc(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, work_tile_info.k_tile_count); + } + else { + // Wait until the preceding split added its accumulators + BarrierManager::wait_eq(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, work_tile_info.K_idx); + + // The block computing the final split for the tile adds previously-reduced partials + // to its accumulators and computes the epilogue. + BlockStripedReduceT::load_add(*accumulator_array, reduction_workspace_array, barrier_group_thread_idx); + } + } + + // Returns whether the block assigned this work should compute the epilogue for the corresponding + // output tile. For the case of stream-K, this should only occur if the work is marked as the final split. + CUTLASS_HOST_DEVICE + static bool + compute_epilogue(WorkTileInfo const& work_tile_info) { + return work_tile_info.is_final_split; + } + + // Returns the linearized index of the output tile corresponding to the tile with offset [L, M, K] + CUTLASS_DEVICE + static int + output_tile_index(Params const& params, WorkTileInfo const& work_tile_info) { + if (params.splits_ > 1) { + auto tiles_mn = params.divmod_batch_.divisor / params.splits_; + if (params.raster_order_ == RasterOrder::AlongN) { + return + (tiles_mn * work_tile_info.L_idx) + + (params.divmod_cluster_shape_major_.divisor * + params.divmod_cluster_blk_major_.divisor * work_tile_info.M_idx) + + work_tile_info.N_idx; + } + else { + return + (tiles_mn * work_tile_info.L_idx) + + (params.divmod_cluster_shape_major_.divisor * + params.divmod_cluster_blk_major_.divisor * work_tile_info.N_idx) + + work_tile_info.M_idx; + } + } + else { + auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster(); + + uint64_t cta_per_grid_dim; + uint64_t cluster_dim_idx; + if (params.raster_order_ == RasterOrder::AlongN) { + uint64_t block_idx_m = (work_tile_info.M_idx - cta_m_in_cluster) / params.divmod_cluster_shape_minor_.divisor; + uint64_t block_idx_n = work_tile_info.N_idx; + cta_per_grid_dim = (params.divmod_cluster_shape_major_.divisor * + params.divmod_cluster_blk_major_.divisor * block_idx_m) + block_idx_n; + cluster_dim_idx = cta_m_in_cluster; + } + else { + uint64_t block_idx_m = work_tile_info.M_idx; + uint64_t block_idx_n = (work_tile_info.N_idx - cta_n_in_cluster) / params.divmod_cluster_shape_minor_.divisor; + cta_per_grid_dim = (params.divmod_cluster_shape_major_.divisor * + params.divmod_cluster_blk_major_.divisor * block_idx_n) + block_idx_m; + cluster_dim_idx = cta_n_in_cluster; + } + + uint64_t tile_in_batch = params.divmod_cluster_shape_minor_.divisor * cta_per_grid_dim; + return params.divmod_batch_.divisor * work_tile_info.L_idx + tile_in_batch + cluster_dim_idx; + } + } + + template + static int + get_workspace_size( + Arguments const& args, + ProblemShape problem_shape, + KernelHardwareInfo const& hw_info, + uint32_t mma_warp_groups) { + + auto problem_shape_mnkl = cute::append<4>(problem_shape, 1); + + ClusterShape cluster_shape; + TileShape tile_shape; + + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); + uint32_t k_tile_per_output_tile = cute::size(cute::ceil_div(cute::shape<2>(problem_shape_mnkl), cute::shape<2>(TileShape{}))); + + return Params::get_workspace_size( + problem_blocks, + k_tile_per_output_tile, + to_gemm_coord(tile_shape), + to_gemm_coord(cluster_shape), + hw_info, + args.splits, + args.max_swizzle_size, + args.raster_order, + mma_warp_groups, + sizeof_bits::value, + sizeof_bits::value + ); + } + + template + static cutlass::Status + initialize_workspace( + Arguments const& args, + void* workspace, + cudaStream_t stream, + ProblemShape const& problem_shape, + KernelHardwareInfo const& hw_info, + uint32_t mma_warp_groups) { + + auto problem_shape_mnkl = cute::append<4>(problem_shape, 1); + + ClusterShape cluster_shape; + TileShape tile_shape; + + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); + uint32_t k_tile_per_output_tile = cute::size(cute::ceil_div(cute::shape<2>(problem_shape_mnkl), cute::shape<2>(TileShape{}))); + + return Params::initialize_workspace( + workspace, + stream, + problem_blocks, + k_tile_per_output_tile, + to_gemm_coord(tile_shape), + to_gemm_coord(cluster_shape), + hw_info, + args.splits, + args.max_swizzle_size, + args.raster_order, + mma_warp_groups, + sizeof_bits::value, + sizeof_bits::value + ); + } + + template + CUTLASS_HOST_DEVICE + static int + get_work_k_tile_count(WorkTileInfo const& work_tile_info, ProblemShape, TileShape) { + return work_tile_info.k_tile_count; + } + + CUTLASS_HOST_DEVICE + static uint32_t + get_work_k_tile_start(WorkTileInfo const& work_tile_info) { + return work_tile_info.K_idx; + } + + // Sets the current stream-K work to compute within work_tile_info. If new_unit is true, work_tile_info + // is populated as a new unit of work. Otherwise, state existing in work_tile_info (e.g., remaining + // iterations) is used to find the next tile in the current work unit. + CUTLASS_DEVICE + static void + set_stream_k_work( + Params const& params, + uint64_t linear_idx, + WorkTileInfo& work_tile_info, + bool new_unit) { + // In the CUTLASS 2.x implementation of stream K, stream-K work is assigned to each stream-K + // threadblock individually. For the most part, the set of K iterations corresponding to stream-K + // work was divided amongst stream-K threadblocks, and a threadblock determined which tile + // it would compute a (potentially-partial) output tile for based on the space of k iterations + // assigned to it. This often results in stream-K threadblocks processing tiles with different + // offsets in the K dimension from one another. This can reduce locality, but is lmitied to the + // (generally few) waves of threadblocks assigned to compute stream-K work. + // + // With the introduction of threadblock clusters, there is additional benefit to maintaining + // locality in the K dimension: shared portions of operands can be multicasted to threadblocks + // within a cluster. Thus, we would like to ensure that the assignment of stream-K work to + // threadblocks respects the ability to perform multicasting. + // + // To do so, we divide up the linearized stream-K units into clusters and share the same K + // offsets for work within clusters. + auto cluster_size = params.divmod_cluster_shape_major_.divisor * params.divmod_cluster_shape_minor_.divisor; + auto cluster_linear_work_idx = linear_idx / cluster_size; + + // Determine the starting k iteration computed by this stream-K work unit + uint32_t unit_iter_start = params.k_tiles_per_sk_unit_ * cluster_linear_work_idx; + + // Adjust the starting position and number of k iterations for "big units," which + // compute one extra iteration. These are the first big_units_ units in the + // linearized ID space. + bool is_big_unit = cluster_linear_work_idx < params.big_units_; + if (is_big_unit) { + // Since the "big units" are the first units in the linearized ID space, each + // of the units preceding this big unit computed one extra iteration. Thus, + // we must offset our start iteration by the number of units that precede + // the current unit in the linearized ID space. + unit_iter_start += cluster_linear_work_idx; + } else { + // Increment by one for each of the big clusters (since all big units precede this unit) + unit_iter_start += params.big_units_; + } + + uint32_t unit_iters; + if (new_unit) { + unit_iters = params.k_tiles_per_sk_unit_; + + // Only adjust iteration count for big unit if we are initializing this + // work unit. For existing work units, the extra iteration for big units + // has already been accounted for in k_tiles_reamaining + if (is_big_unit) { + ++unit_iters; + } + } + else { + unit_iters = work_tile_info.k_tile_remaining; + } + + // Find the output tile corresponding to the final k iteration covered by this + // work unit. Stream-K work units will work backwards in terms of the tiles they + // are responsible computing. This is beneficial because the final (partial) + // tile computed by a stream-K block is typically the beginning of the output + // tile, while the beginning (partial) tile is typically the ending of another + // output tile. Since ending portions of an output tile must reduce across + // other work units computing portions of that output tile, it is preferable + // for them to be computed later, so as to reduce the likelihood of blocking + // on other work. + uint32_t unit_iter_end = unit_iter_start + unit_iters - 1; + uint32_t true_tile_id = unit_iter_end / params.k_tiles_per_output_tile_; + uint32_t true_tile_iter_start = true_tile_id * params.k_tiles_per_output_tile_; + uint32_t true_tile_iter_end = true_tile_iter_start + params.k_tiles_per_output_tile_; + + // Bring the linearized tile ID back into the space of tiles, rather than clusters + true_tile_id *= cluster_size; + + auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster(); + + // The final linearized tile ID is in units of the cluster dimension over which we rasterize. + if (params.raster_order_ == RasterOrder::AlongN) { + true_tile_id += cta_n_in_cluster * params.divmod_cluster_shape_minor_.divisor; + } + else { + true_tile_id += cta_m_in_cluster * params.divmod_cluster_shape_minor_.divisor; + } + + // The unit's starting k iteration in the current tile is either the starting + // iteration for the tile as a whole, or the starting k iteration for the unit + // as a whole (if the latter is greater than the former). + uint32_t tile_iter_start = max(true_tile_iter_start, unit_iter_start); + + // Similarly, the unit's ending k iteration (exclusive) is either the end of + // the current tile it is assigned, or the ending iteration of the unit as a whole + // (if the latter is less than the former). + uint32_t tile_iter_end = min(true_tile_iter_end, unit_iter_end + 1); + + uint32_t tile_iters = tile_iter_end - tile_iter_start; + + uint64_t work_idx_l, remainder; + params.divmod_batch_(work_idx_l, remainder, true_tile_id); + + uint64_t cta_per_grid_dim, dontcare; + params.divmod_cluster_shape_minor_(cta_per_grid_dim, dontcare, remainder); + + auto [work_idx_m, work_idx_n] = UnderlyingScheduler::get_work_idx_m_and_n( + cta_per_grid_dim, + params.divmod_cluster_shape_major_, + params.divmod_cluster_shape_minor_, + params.divmod_cluster_blk_major_, + params.log_swizzle_size_, + params.raster_order_); + + // + // Update the work_tile_info + // + + // Set the M, N, and L block offsets + work_tile_info.M_idx = work_idx_m; + work_tile_info.N_idx = work_idx_n; + work_tile_info.L_idx = static_cast(work_idx_l); + + // Set the k offset to be the starting k tile for this output tile + work_tile_info.K_idx = static_cast(tile_iter_start - true_tile_iter_start); + + // Set the split count to be the number of k tiles in the output tile + work_tile_info.splits = params.k_tiles_per_output_tile_; + + // Any checks for invalid work units should be done prior to this call + work_tile_info.is_valid_tile = true; + + work_tile_info.k_tile_count = tile_iters; + work_tile_info.k_tile_remaining = unit_iters; + + // Compute the epilogue if this unit of work contains the ending k iteration for + // the output tile in question + work_tile_info.is_final_split = (tile_iter_end == true_tile_iter_end); + } + + // Returns a WorkTileInfo to be computed for either the data-parallel or split-K + // work unit identified by the provided linear ID. + CUTLASS_DEVICE + static WorkTileInfo + set_non_stream_k_work(uint64_t linear_idx, Params const& params, bool is_split_k) { + + // The linearized ID space is in terms of work units, rather than tiles. However, + // to compute the correct block offset for a data-parallel tile, we must convert + // the current ID to the data-parallel tile it corresponds to. Each data-parallel + // unit maps to a single data-parallel tile, but each stream-K unit can map to more + // than one tile. Thus, we must offset the work-unit ID among the data-parallel units + // by the total number of output tiles that will be computed by stream-K units. + // + // The logic below also works for the split-K case, in which sk_units_ and sk_tiles_ + // are each 0. + uint64_t linear_work_idx = linear_idx - params.sk_units_ + params.sk_tiles_; + + // Map worker's linear index into the CTA-tiled problem shape to the corresponding MNL indices + uint64_t work_idx_l, remainder; + params.divmod_batch_(work_idx_l, remainder, linear_work_idx); + + uint64_t work_idx_k = 0; + if (is_split_k) { + params.divmod_k_(work_idx_k, remainder, remainder); + } + + uint64_t cta_per_grid_dim, dontcare; + params.divmod_cluster_shape_minor_(cta_per_grid_dim, dontcare, remainder); + + auto [work_idx_m, work_idx_n] = UnderlyingScheduler::get_work_idx_m_and_n( + cta_per_grid_dim, + params.divmod_cluster_shape_major_, + params.divmod_cluster_shape_minor_, + params.divmod_cluster_blk_major_, + params.log_swizzle_size_, + params.raster_order_); + + bool is_final_split = (work_idx_k == params.splits_ - 1); + + uint32_t k_tiles = params.k_tiles_per_output_tile_; + if (is_split_k) { + // Determine the number of iterations and starting iteration of this split. + // Doing so requires accounting for residual iterations, which are handled + // by the first big_units_ splits (with big_units_ = tiles % sm_count). + + // Offsets for "normal" units. No additional k iterations are performed, + // and big_units_ "big" units preceded us, each of which performed one + // additional iteration. Thus, we must increase our split starting offset + // by big_units_. + int additional_k_tiles = 0; + int split_start_offset = params.big_units_; + + if (work_idx_k < params.big_units_) { + // Offsets for "big" units. One additional k iteration is performed, + // and each split preceding us was a big unit, so we must increase + // our split starting offset by our split ID (work_idx_k). + additional_k_tiles = 1; + split_start_offset = work_idx_k; + } + + // Set up k iteration count and split starting iteration assuming the + // iteration space is evenly split. + k_tiles /= params.splits_; + work_idx_k *= k_tiles; + + // Apply any fixup needed to handle residuals + work_idx_k += split_start_offset; + k_tiles += additional_k_tiles; + } + + return { + work_idx_m, + work_idx_n, + static_cast(work_idx_k), + static_cast(work_idx_l), + true, + params.k_tiles_per_output_tile_, + k_tiles, + k_tiles, // remaining iterations + is_final_split + }; + } +}; + +} // namespace cutlass::gemm::kernel::detail diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sparse_gemm.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sparse_gemm.h new file mode 100644 index 0000000000000000000000000000000000000000..1964fba8bcec972474a6434cf1c09c648e8e29b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sparse_gemm.h @@ -0,0 +1,400 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/semaphore.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled. +> +struct SparseGemm { + + using Mma = Mma_; + using Epilogue = Epilogue_; + using OutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + static bool const kSplitKSerial = SplitKSerial; + + static int const kSparse = Mma::kSparse; + static int const kMetaSizeInBits = Mma::kMetaSizeInBits; + static int const kMaxID2 = Mma::kMaxID2; + static int const kElementsPerElementE = Mma::kElementsPerElementE; + + using ElementE = typename Mma::ElementE; + using LayoutE = typename Mma::LayoutE; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Parameters structure + struct Params { + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorA::TensorRef ref_A; + typename Mma::IteratorB::Params params_B; + typename Mma::IteratorB::TensorRef ref_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::TensorRef ref_C; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::OutputTileIterator::TensorRef ref_D; + typename Mma::IteratorE::Params params_E; + typename Mma::IteratorE::TensorRef ref_E; + typename OutputOp::Params output_op; + int *semaphore; + int gemm_k_iterations; + int gemm_k_size; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): swizzle_log_tile(0), semaphore(0), gemm_k_iterations(0), gemm_k_size(0) { } + + CUTLASS_HOST_DEVICE + Params( + cutlass::gemm::GemmCoord const & problem_size, + cutlass::gemm::GemmCoord const & grid_tiled_shape, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::OutputTileIterator::TensorRef ref_C, + typename Epilogue::OutputTileIterator::TensorRef ref_D, + typename Mma::IteratorE::TensorRef ref_E, + typename OutputOp::Params output_op = typename OutputOp::Params(), + int *workspace = nullptr + ): + problem_size(problem_size), + grid_tiled_shape(grid_tiled_shape), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A(ref_A.layout()), + ref_A(ref_A), + params_B(ref_B.layout()), + ref_B(ref_B), + params_C(ref_C.layout()), + ref_C(ref_C), + params_D(ref_D.layout()), + ref_D(ref_D), + params_E(ref_E.layout()), + ref_E(ref_E), + output_op(output_op) { + + int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k(); + + gemm_k_size = gemm_k_iterations * Mma::Shape::kK; + + semaphore = workspace; + } + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + SparseGemm() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::OutputTileIterator::TensorRef ref_C, + typename Epilogue::OutputTileIterator::TensorRef ref_D, + typename Mma::IteratorE::TensorRef ref_E) { + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + static int const kAlignmentE = Mma::IteratorE::AccessType::kElements; + + if (!TensorRef_aligned(ref_A, kAlignmentA)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_B, kAlignmentB)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_C, kAlignmentC)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_D, kAlignmentC)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_E, kAlignmentE)) { + return Status::kErrorMisalignedOperand; + } + + if ((problem_size.m() % kAlignmentA) || ((problem_size.k() / kSparse) % kAlignmentA) || + (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || + (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC) || + (problem_size.m() % kAlignmentE) || ((problem_size.k() / kSparse) % kAlignmentE)) { + + return Status::kErrorMisalignedOperand; + } + + // The k dimension has to be the multiple of the Threadblock k because out + // of bound meta data would be initialized to 0 by acync.zfill but 0 is not + // a valid meta data. + if (problem_size.k() % Mma::Shape::kK) { + return Status::kErrorMisalignedOperand; + } + + // M dimension has to be multiple of 32 (sparse float) or 16 (sparse int) + // because of the row reordering of operand E + static int const kAlignmentM = (sizeof(ElementE) == 2) ? 32 : 16; + + if (problem_size.m() % kAlignmentM) { + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.k() * params.gemm_k_size / kSparse, + }; + + cutlass::MatrixCoord tb_offset_B{ + threadblock_tile_offset.k() * params.gemm_k_size, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + cutlass::MatrixCoord tb_offset_E{ + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.k() * params.gemm_k_size / kSparse, + }; + + // Problem size is a function of threadblock index in the K dimension + int problem_size_k = min( + params.problem_size.k(), + (threadblock_tile_offset.k() + 1) * params.gemm_k_size); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - tb_offset_B.row() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A, B, and E operands + typename Mma::IteratorA iterator_A( + params.params_A, + params.ref_A.data(), + {params.problem_size.m(), problem_size_k / kSparse}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + params.ref_B.data(), + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + typename Mma::IteratorE iterator_E( + params.params_E, params.ref_E.data(), + {params.problem_size.m(), + problem_size_k / kSparse / kElementsPerElementE}, + thread_idx, tb_offset_E); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + if (!kSplitKSerial || gemm_k_iterations > 0) { + // Compute threadblock-scoped matrix multiply-add + mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, iterator_E, accumulators); + } + + // + // Epilogue + // + + OutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + // If performing a reduction via split-K, fetch the initial synchronization + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + params.ref_C.data(), + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + params.ref_D.data(), + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + __threadfence(); + } + + // Execute the epilogue operator to update the destination tensor. + epilogue(output_op, iterator_D, accumulators, iterator_C); + + // + // Release the semaphore + // + + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + __threadfence(); + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sparse_gemm_row_broadcast.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sparse_gemm_row_broadcast.h new file mode 100644 index 0000000000000000000000000000000000000000..9c94efde34e21e7605fcbe9f507924f944307e9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/sparse_gemm_row_broadcast.h @@ -0,0 +1,400 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/semaphore.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled. +> +struct SparseGemmRowBroadcast { + + using Mma = Mma_; + using Epilogue = Epilogue_; + using OutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + static bool const kSplitKSerial = SplitKSerial; + + static int const kSparse = Mma::kSparse; + static int const kMetaSizeInBits = Mma::kMetaSizeInBits; + static int const kMaxID2 = Mma::kMaxID2; + static int const kElementsPerElementE = Mma::kElementsPerElementE; + + using ElementE = typename Mma::ElementE; + using LayoutE = typename Mma::LayoutE; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Parameters structure + struct Params { + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorA::TensorRef ref_A; + typename Mma::IteratorB::Params params_B; + typename Mma::IteratorB::TensorRef ref_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::TensorRef ref_C; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::OutputTileIterator::TensorRef ref_D; + typename Mma::IteratorE::Params params_E; + typename Mma::IteratorE::TensorRef ref_E; + typename OutputOp::Params output_op; + int *semaphore; + int gemm_k_iterations; + int gemm_k_size; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): swizzle_log_tile(0), semaphore(0), gemm_k_iterations(0), gemm_k_size(0) { } + + CUTLASS_HOST_DEVICE + Params( + cutlass::gemm::GemmCoord const & problem_size, + cutlass::gemm::GemmCoord const & grid_tiled_shape, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::OutputTileIterator::TensorRef ref_C, + typename Epilogue::OutputTileIterator::TensorRef ref_D, + typename Mma::IteratorE::TensorRef ref_E, + typename OutputOp::Params output_op = typename OutputOp::Params(), + int *workspace = nullptr + ): + problem_size(problem_size), + grid_tiled_shape(grid_tiled_shape), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A(ref_A.layout()), + ref_A(ref_A), + params_B(ref_B.layout()), + ref_B(ref_B), + params_C(ref_C.layout()), + ref_C(ref_C), + params_D(ref_D.layout()), + ref_D(ref_D), + params_E(ref_E.layout()), + ref_E(ref_E), + output_op(output_op) { + + int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k(); + + gemm_k_size = gemm_k_iterations * Mma::Shape::kK; + + semaphore = workspace; + } + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + SparseGemmRowBroadcast() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::OutputTileIterator::TensorRef ref_C, + typename Epilogue::OutputTileIterator::TensorRef ref_D, + typename Mma::IteratorE::TensorRef ref_E) { + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + static int const kAlignmentE = Mma::IteratorE::AccessType::kElements; + + if (!TensorRef_aligned(ref_A, kAlignmentA)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_B, kAlignmentB)) { + return Status::kErrorMisalignedOperand; + } + + // if (!TensorRef_aligned(ref_C, kAlignmentC)) { + // return Status::kErrorMisalignedOperand; + // } + + if (!TensorRef_aligned(ref_D, kAlignmentC)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_E, kAlignmentE)) { + return Status::kErrorMisalignedOperand; + } + + if ((problem_size.m() % kAlignmentA) || ((problem_size.k() / kSparse) % kAlignmentA) || + (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || + (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC) || + (problem_size.m() % kAlignmentE) || ((problem_size.k() / kSparse) % kAlignmentE)) { + + return Status::kErrorMisalignedOperand; + } + + // The k dimension has to be the multiple of the Threadblock k because out + // of bound meta data would be initialized to 0 by acync.zfill but 0 is not + // a valid meta data. + if (problem_size.k() % Mma::Shape::kK) { + return Status::kErrorMisalignedOperand; + } + + // M dimension has to be multiple of 32 (sparse float) or 16 (sparse int) + // because of the row reordering of operand E + static int const kAlignmentM = (sizeof(ElementE) == 2) ? 32 : 16; + + if (problem_size.m() % kAlignmentM) { + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.k() * params.gemm_k_size / kSparse, + }; + + cutlass::MatrixCoord tb_offset_B{ + threadblock_tile_offset.k() * params.gemm_k_size, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + cutlass::MatrixCoord tb_offset_E{ + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.k() * params.gemm_k_size / kSparse, + }; + + // Problem size is a function of threadblock index in the K dimension + int problem_size_k = min( + params.problem_size.k(), + (threadblock_tile_offset.k() + 1) * params.gemm_k_size); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - tb_offset_B.row() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A, B, and E operands + typename Mma::IteratorA iterator_A( + params.params_A, + params.ref_A.data(), + {params.problem_size.m(), problem_size_k / kSparse}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + params.ref_B.data(), + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + typename Mma::IteratorE iterator_E( + params.params_E, params.ref_E.data(), + {params.problem_size.m(), + problem_size_k / kSparse / kElementsPerElementE}, + thread_idx, tb_offset_E); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx(); + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + if (!kSplitKSerial || gemm_k_iterations > 0) { + // Compute threadblock-scoped matrix multiply-add + mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, iterator_E, accumulators); + } + + // + // Epilogue + // + + OutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + // If performing a reduction via split-K, fetch the initial synchronization + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + params.ref_C.data(), + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + params.ref_D.data(), + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + __threadfence(); + } + + // Execute the epilogue operator to update the destination tensor. + epilogue(output_op, iterator_D, accumulators, iterator_C); + + // + // Release the semaphore + // + + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + __threadfence(); + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/symm_universal.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/symm_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..f05cf7df9b97ffba188362377c192b5f6355862e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/symm_universal.h @@ -0,0 +1,698 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma1_, ///! Threadblock-scoped triangular matrix multiply-accumulate (A*B or B*A) + typename Mma2_, ///! Threadblock-scoped triangular matrix multiply-accumulate (AT*B or B*AT) + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + SideMode SideMode_, ///! Side Mode for the kernel (kLeft or kRight) + FillMode FillMode_ ///! Fill Mode for triangular matrix (kLower or kUpper) +> +struct SymmUniversal { +public: + + using Mma1 = Mma1_; + using Mma2 = Mma2_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma1::IteratorA::Element; + using ElementB = typename Mma1::IteratorB::Element; + + // Mma1 (TRMM - with diagonal: C_tmp = alpha * A * B) + using LayoutA = typename Mma1::IteratorA::Layout; + using LayoutBT = typename Mma1::IteratorB::Layout; + static ComplexTransform const kMma1TransformA = Mma1::kTransformA; + static ComplexTransform const kMma1TransformB = Mma1::kTransformB; + + // Mma2 (TRMM - withOUT diagonal: alpha * AT * B) + using LayoutB = typename Mma2::IteratorA::Layout; + using LayoutAT = typename Mma2::IteratorB::Layout; + static ComplexTransform const kMma2TransformA = Mma2::kTransformA; + static ComplexTransform const kMma2TransformB = Mma2::kTransformB; + + // Common type definitions for Mma1 and Mma2 + using Operator = typename Mma1::Operator; + using OperatorClass = typename Mma1::Operator::OperatorClass; + using ThreadblockShape = typename Mma1::Shape; + using WarpShape = typename Mma1::Operator::Shape; + using InstructionShape = typename Mma1::Policy::Operator::InstructionShape; + using ArchTag = typename Mma1::ArchTag; + + static int const kStages = Mma1::kStages; + static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements; + + // Output related typedefinitions + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + static SideMode const kSideModeA = SideMode_; + static FillMode const kFillModeA = FillMode_; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma1::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + + // + // Structures + // + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord problem_size; + int batch_count; + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + + typename LayoutA::Stride::Index lda; + typename LayoutB::Stride::Index ldb; + typename LayoutC::Stride::Index ldc; + typename LayoutC::Stride::Index ldd; + + // + // Methods + // + + Arguments(): + mode(GemmUniversalMode::kGemm), + batch_count(1), + ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr) { } + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + typename LayoutA::Stride::Index lda, + typename LayoutB::Stride::Index ldb, + typename LayoutC::Stride::Index ldc, + typename LayoutC::Stride::Index ldd + ): + mode(mode), + problem_size(problem_size), + batch_count(batch_count), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + batch_stride_A(batch_stride_A), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D), + lda(lda), ldb(ldb), ldc(ldc), ldd(ldd) { + + } + + /// Returns arguments for the transposed problem sizes + Arguments transposed_problem_size() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + + return args; + } + + /// Returns arguments for the transposed matrices + Arguments swapped_matrices() const { + Arguments args(*this); + + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.batch_stride_A, args.batch_stride_B); + + return args; + } + }; + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params { + + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + + // Mma1 Iterator A and B params + typename Mma1::IteratorA::Params params_A_mma1; + typename Mma1::IteratorB::Params params_B_mma1; + + // Mma2 Iterator A and B params + typename Mma2::IteratorA::Params params_A_mma2; + typename Mma2::IteratorB::Params params_B_mma2; + + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::Params params_D; + + typename EpilogueOutputOp::Params output_op; + + GemmUniversalMode mode; + int batch_count; + int gemm_k_size; + + void * ptr_A; + void * ptr_B; + void * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + + int *semaphore; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): + swizzle_log_tile(0), + params_A_mma1(0), + params_B_mma1(0), + params_A_mma2(0), + params_B_mma2(0), + params_C(0), + params_D(0), + batch_count(0), + gemm_k_size(0), + mode(cutlass::gemm::GemmUniversalMode::kGemm), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + batch_stride_A(0), + batch_stride_B(0), + batch_stride_C(0), + batch_stride_D(0), + semaphore(nullptr) { } + + CUTLASS_HOST_DEVICE + Params( + Arguments const &args, + cutlass::gemm::GemmCoord const & grid_tiled_shape, + int gemm_k_size, + void *workspace = nullptr + ): + problem_size(args.problem_size), + grid_tiled_shape(grid_tiled_shape), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A_mma1(args.lda), + params_B_mma1(args.ldb), + params_A_mma2(args.lda), + params_B_mma2(args.ldb), + params_C(args.ldc), + params_D(args.ldd), + output_op(args.epilogue), + mode(args.mode), + batch_count(args.batch_count), + gemm_k_size(gemm_k_size), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C(const_cast(args.ptr_C)), + ptr_D(const_cast(args.ptr_D)), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C(args.batch_stride_C), + batch_stride_D(args.batch_stride_D), + semaphore(static_cast(workspace)) { + } + + CUTLASS_HOST_DEVICE + void update( + Arguments const &args, + void *workspace = nullptr) { + + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + + output_op = args.epilogue; + + semaphore = static_cast(workspace); + } + + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma1::SharedStorage mma1_main_loop; + typename Mma2::SharedStorage mma2_main_loop; + typename Epilogue::SharedStorage epilogue; + }; + +public: + + // + // Methods + // + + CUTLASS_DEVICE + SymmUniversal() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || + (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || + (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { + + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + + /// Executes two GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_MxK_mma1{ + threadblock_tile_offset.m() * Mma1::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_KxN_mma1{ + offset_k, + threadblock_tile_offset.n() * Mma1::Shape::kN + }; + + cutlass::MatrixCoord tb_offset_MxK_mma2{ + threadblock_tile_offset.m() * Mma1::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_KxN_mma2{ + offset_k, + threadblock_tile_offset.n() * Mma1::Shape::kN + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply for Mma1 + Mma1 mma1(shared_storage.mma1_main_loop, thread_idx, warp_idx, lane_idx); + + // Construct thread-scoped matrix multiply for Mma2 + Mma2 mma2(shared_storage.mma2_main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma1::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK; + int gemm_k_iterations_mma1 = gemm_k_iterations; + int gemm_k_iterations_mma2 = gemm_k_iterations; + + + /****************************************************************************************************** + * SYMM (Side Mode, Fill Mode) is made of two TRMMs: + First TRMM (Mma1: Side Mode, Fill Mode, Non-Unit Diag): (A * B) or (B * A) + Second TRMM (Mma2: Side Mode, Inverted Fill Mode, Unit Diag): (AT * B) or (B * AT) + + * For the first TRMM (Mma1) of SYMM, the following method is used to calculate the k-iterations: + First two cases: (Left Side, Lower Fill) and (Right Side, Upper Fill) are transpose of each other + - (Left Side, Lower Fill): calculate bottom of the CTA tile, then find the k-iterations + needed to process all elements till that coordinate. + - (Right Side, Upper Fill): calculate right end of the CTA tile, then find the k-iterations + needed to process all elements till that coordinate. + + Last two cases: (Left Side, Upper Fill) and (Right Side, Lower Fill) are transpose of each other + - (Left Side, Upper Fill): calculate the top of the CTA tile, then find k-iterations + that can be skipped for all elements of this tile. + - (Right Side, Lower Fill): calculate the left start of the CTA tile, then find k-iterations + that can be skipped for all elements of this tile. + + * For the second TRMM (Mma2) of SYMM, the k-iterations and threadblock offsets are calculated + the same way as the first TRMM (Mma1) of same side mode but with inverted fill mode. + For example, if the first TRMM is left sided with lower fill, the second TRMM would be + left sided with upper fill. + ********************************************************************************************************/ + + if (kSideModeA == SideMode::kLeft && kFillModeA == FillMode::kLower) { + + int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM + Mma1::Shape::kK - 1) / Mma1::Shape::kK; + if (k_iterations_till_diagonal_mma1 < gemm_k_iterations) { + gemm_k_iterations_mma1 = k_iterations_till_diagonal_mma1; + } + + int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.m()) * Mma1::Shape::kM) / Mma1::Shape::kK; + if (k_iterations_till_diagonal_mma2 != 0) { + tb_offset_MxK_mma2 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma2 * Mma1::Shape::kK}); + tb_offset_KxN_mma2 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma2 * Mma1::Shape::kK, 0}); + gemm_k_iterations_mma2 -= k_iterations_till_diagonal_mma2; + } + + } else if (kSideModeA == SideMode::kRight && kFillModeA == FillMode::kUpper) { + + int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.n() + 1) * Mma1::Shape::kN + Mma1::Shape::kK - 1) / Mma1::Shape::kK; + if (k_iterations_till_diagonal_mma1 < gemm_k_iterations) { + gemm_k_iterations_mma1 = k_iterations_till_diagonal_mma1; + } + + int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.n()) * Mma1::Shape::kN) / Mma1::Shape::kK; + if (k_iterations_till_diagonal_mma2 != 0) { + tb_offset_MxK_mma2 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma2 * Mma1::Shape::kK}); + tb_offset_KxN_mma2 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma2 * Mma1::Shape::kK, 0}); + gemm_k_iterations_mma2 -= k_iterations_till_diagonal_mma2; + } + + } else if (kSideModeA == SideMode::kLeft && kFillModeA == FillMode::kUpper) { + + int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.m()) * Mma1::Shape::kM) / Mma1::Shape::kK; + if (k_iterations_till_diagonal_mma1 != 0) { + tb_offset_MxK_mma1 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma1 * Mma1::Shape::kK}); + tb_offset_KxN_mma1 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma1 * Mma1::Shape::kK, 0}); + gemm_k_iterations_mma1 -= k_iterations_till_diagonal_mma1; + } + + int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM + Mma1::Shape::kK - 1) / Mma1::Shape::kK; + if (k_iterations_till_diagonal_mma2 < gemm_k_iterations) { + gemm_k_iterations_mma2 = k_iterations_till_diagonal_mma2; + } + + } else if (kSideModeA == SideMode::kRight && kFillModeA == FillMode::kLower) { + + int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.n()) * Mma1::Shape::kN) / Mma1::Shape::kK; + + if (k_iterations_till_diagonal_mma1 != 0) { + tb_offset_MxK_mma1 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma1 * Mma1::Shape::kK}); + tb_offset_KxN_mma1 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma1 * Mma1::Shape::kK, 0}); + gemm_k_iterations_mma1 -= k_iterations_till_diagonal_mma1; + } + + int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.n() + 1) * Mma1::Shape::kN + Mma1::Shape::kK - 1) / Mma1::Shape::kK; + if (k_iterations_till_diagonal_mma2 < gemm_k_iterations) { + gemm_k_iterations_mma2 = k_iterations_till_diagonal_mma2; + } + + } + + // Construct iterators to A and B operands for Mma1 + typename Mma1::IteratorA iterator_A_mma1( + params.params_A_mma1, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_MxK_mma1); + + typename Mma1::IteratorB iterator_B_mma1( + params.params_B_mma1, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_KxN_mma1); + + // Construct iterators to A and B operands for Mma2 + typename Mma2::IteratorA iterator_A_mma2( + params.params_A_mma2, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_MxK_mma2); + + typename Mma2::IteratorB iterator_B_mma2( + params.params_B_mma2, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_KxN_mma2); + + // Compute threadblock-scoped matrix multiply-add (A x B) or (B x A) + mma1( + gemm_k_iterations_mma1, + accumulators, + iterator_A_mma1, + iterator_B_mma1, + accumulators); + + // Compute threadblock-scoped matrix multiply-add (AT x B) or (B x AT) + mma2( + gemm_k_iterations_mma2, + accumulators, + iterator_A_mma2, + iterator_B_mma2, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma1::Shape::kM, + threadblock_tile_offset.n() * Mma1::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + + // + // Fetch pointers based on mode. + // + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[threadblock_tile_offset.k()]; + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + __threadfence(); + } + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + // + // Release the semaphore + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/tile_scheduler.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/tile_scheduler.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a81460e4f6f4e3b79af500b03d400c3fc4d5e0e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/tile_scheduler.hpp @@ -0,0 +1,129 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +/*! \file + \brief Utilities for selecting default tile schedulers +*/ + +#include "cutlass/detail/dependent_false.hpp" +#include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" +#include "cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp" +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::gemm { + +//////////////////////////////////////////////////////////////////////////////// + +// +// Tags for specifying tile schedulers +// + +struct PersistentScheduler { }; + +struct StreamKScheduler { }; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::gemm + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::gemm::kernel::detail { + +// +// Selectors mapping tile scheduler tag and arch tag to a tile scheduler class +// + +template < + class TileSchedulerTag, + class ArchTag, + class TileShape, + class ClusterShape +> +struct TileSchedulerSelector { + static_assert(cutlass::detail::dependent_false, + "Could not select a tile scheduler for given parameters."); +}; + +template < + class ArchTag, + class TileShape, + class ClusterShape +> +struct TileSchedulerSelector< + PersistentScheduler, + ArchTag, + TileShape, + ClusterShape + > { + using Scheduler = PersistentTileSchedulerSm90; +}; + +// Default (void) for Sm90 maps to PersistentTileSchedulerSm90 +template < + class ArchTag, + class TileShape, + class ClusterShape +> +struct TileSchedulerSelector< + void, + ArchTag, + TileShape, + ClusterShape + > { + using Scheduler = typename TileSchedulerSelector< + PersistentScheduler, + ArchTag, + TileShape, + ClusterShape + >::Scheduler; +}; + +template < + class TileShape, + class ClusterShape +> +struct TileSchedulerSelector< + StreamKScheduler, + arch::Sm90, + TileShape, + ClusterShape + > { + using Scheduler = PersistentTileSchedulerSm90StreamK; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::gemm::kernel::detail + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/tile_scheduler_params.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/tile_scheduler_params.h new file mode 100644 index 0000000000000000000000000000000000000000..eb98fd2f42f39056da5915e8069e30aaba32a704 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/tile_scheduler_params.h @@ -0,0 +1,1005 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +/*! \file + \brief Parameters structures for persistent tile schedulers +*/ + +/* + Note: CUTLASS 3x increases the host compiler requirements to C++17. However, certain + existing integrations of CUTLASS require C++11 host compilers. + + Until this requirement can be lifted, certain headers with this annotation are required + to be remain consistent with C++11 syntax. + + C++11 compatibility is enforced by this unit test: `cutlass_test_unit_core_cpp11`. +*/ + +#include "cutlass/coord.h" +#include "cutlass/kernel_hardware_info.h" +#include "cutlass/workspace.h" +#include "cutlass/platform/platform.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm_coord.h" +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { +namespace detail { + +//////////////////////////////////////////////////////////////////////////////// + +// +// Parameters for SM90 tile schedulers +// + +// Parameters for SM90 persistent tile scheduler +struct PersistentTileSchedulerSm90Params { + + enum class RasterOrder { + AlongM, + AlongN + }; + + enum class RasterOrderOptions { + Heuristic, + AlongM, + AlongN + }; + + FastDivmodU64 divmod_cluster_shape_major_{}; + FastDivmodU64 divmod_cluster_shape_minor_{}; + FastDivmodU64 divmod_batch_{}; + FastDivmodU64 divmod_cluster_blk_major_{}; + + uint64_t blocks_per_problem_ = 0; + int32_t log_swizzle_size_ = 0; + RasterOrder raster_order_ = RasterOrder::AlongN; + + // Initializes members. This variant of the method should only be used when + // problem_shape and tile_shape contain modes of only rank 1. + void + initialize( + BatchedGemmCoord problem_shape, + GemmCoord tile_shape, + GemmCoord cluster_shape, + KernelHardwareInfo const& hw_info, + int max_swizzle_size, + RasterOrderOptions raster_order_option + ) { + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape, tile_shape, cluster_shape); + return initialize( + problem_blocks, + cluster_shape, + hw_info, + max_swizzle_size, + raster_order_option + ); + } + + // Version of initialize that takes in as input the number of CTAs in the M and N and L dimensions. + // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, + // for which using CuTe algebra for calculating tile shapes is easiest. + void + initialize( + dim3 problem_blocks, + GemmCoord cluster_shape, + KernelHardwareInfo const& hw_info, + int max_swizzle_size, + RasterOrderOptions raster_order_option + ) { + + CUTLASS_UNUSED(hw_info); + + // Round up to nearest multiple of swizzle_size along each mode + auto log_swizzle_size = get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle_size); + auto problem_blocks_m = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); + auto problem_blocks_n = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); + + RasterOrder raster_order = get_rasterization_order( + problem_blocks_m, + problem_blocks_n, + raster_order_option + ); + + // + // Set members + // + + blocks_per_problem_ = problem_blocks_m * problem_blocks_n * problem_blocks.z; + log_swizzle_size_ = log_swizzle_size; + raster_order_ = raster_order; + divmod_batch_ = FastDivmodU64(problem_blocks_m * problem_blocks_n); + + if (raster_order == RasterOrder::AlongN) { + divmod_cluster_shape_major_ = FastDivmodU64(cluster_shape.n()); + divmod_cluster_shape_minor_ = FastDivmodU64(cluster_shape.m()); + divmod_cluster_blk_major_ = FastDivmodU64(problem_blocks_n / cluster_shape.n()); + } + else { + divmod_cluster_shape_major_ = FastDivmodU64(cluster_shape.m()); + divmod_cluster_shape_minor_ = FastDivmodU64(cluster_shape.n()); + divmod_cluster_blk_major_ = FastDivmodU64(problem_blocks_m / cluster_shape.m()); + } + } + + // Given the inputs, computes the physical grid we should launch. + // This variant of the method should only be used when + // problem_shape and tile_shape contain modes of only rank 1. + CUTLASS_HOST_DEVICE static + dim3 + get_grid_shape( + BatchedGemmCoord problem_shape, + GemmCoord cta_shape, + GemmCoord cluster_shape, + KernelHardwareInfo hw_info, + int max_swizzle_size, + RasterOrderOptions raster_order_option, + bool truncate_by_problem_size=true) { + + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape, cta_shape, cluster_shape); + return get_grid_shape( + problem_blocks, + cluster_shape, + hw_info, + max_swizzle_size, + raster_order_option, + truncate_by_problem_size + ); + } + + // Version of get_grid_shape that takes in as input the number of CTAs in the M and N and L dimensions. + // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, + // for which using CuTe algebra for calculating tile shapes is easiest. + CUTLASS_HOST_DEVICE static + dim3 + get_grid_shape( + dim3 problem_blocks, + GemmCoord cluster_shape, + KernelHardwareInfo hw_info, + int max_swizzle_size, + RasterOrderOptions raster_order_option, + bool truncate_by_problem_size=true) { + + int const sm_count = hw_info.sm_count; + + // Round up to nearest multiple of swizzle_size along each mode + auto log_swizzle_size = get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle_size); + auto problem_blocks_m = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); + auto problem_blocks_n = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); + + int problem_blocks_total = problem_blocks_m * problem_blocks_n * problem_blocks.z; + + RasterOrder raster_order = get_rasterization_order( + problem_blocks_m, + problem_blocks_n, + raster_order_option + ); + + dim3 launch_grid; + + if (raster_order == RasterOrder::AlongN) { + launch_grid = dim3(cluster_shape.m(), 1, 1); + } + else { + launch_grid = dim3(1, cluster_shape.n(), 1); + } + + auto possibly_truncate = [&](int x, int y) { + if (truncate_by_problem_size) { + return cutlass::platform::min(x, y); + } + else { + return x; + } + }; + + // The else path is generic, however, we can avoid some divs if we know cluster size is 1 + auto cluster_size = cluster_shape.m() * cluster_shape.n(); + if (cluster_size == 1) { + if (raster_order == RasterOrder::AlongN) { + launch_grid.y = possibly_truncate(sm_count, problem_blocks_total); + } + else { + launch_grid.x = possibly_truncate(sm_count, problem_blocks_total); + } + } + else { + /* + * Optimal grid size calculation is based on + * GH100: 8 GPCs, 72 TPCs (9 TPCs/GPC), 2 SMs/TPC, 144 SMs per full GPU + * Hence, maximum SMs per GPC = 18 + */ + constexpr int max_sm_per_gpc = 18; + // Provided SM count could possibly be less than the assumed maximum SMs per GPC + auto cluster_size = cluster_shape.m() * cluster_shape.n(); + int const min_num_gpc = sm_count < max_sm_per_gpc ? 1 : sm_count / max_sm_per_gpc; + int const max_cta_occupancy_per_gpc = max_sm_per_gpc - (max_sm_per_gpc % cluster_size); + int cta_per_device = min_num_gpc * max_cta_occupancy_per_gpc; + + // The calculation below allows for larger grid size launch for different GPUs. + int const num_gpc_residual = sm_count < max_sm_per_gpc ? 0 : sm_count % max_sm_per_gpc; + int const max_cta_occupancy_per_residual_gpc = num_gpc_residual - (num_gpc_residual % cluster_size); + cta_per_device += max_cta_occupancy_per_residual_gpc; + + cta_per_device = sm_count < cta_per_device ? sm_count : cta_per_device; + + if (raster_order == RasterOrder::AlongN) { + launch_grid.y = possibly_truncate( + cta_per_device / cluster_shape.m(), + problem_blocks_total / cluster_shape.m()); + } + else { + launch_grid.x = possibly_truncate( + cta_per_device / cluster_shape.n(), + problem_blocks_total / cluster_shape.n()); + } + } + return launch_grid; + } + + CUTLASS_HOST_DEVICE + static int32_t + get_log_swizzle_size(int problem_ctas_m, int problem_ctas_n, int max_swizzle_size) { + int min_cta_dim = cutlass::platform::min(problem_ctas_m, problem_ctas_n); + if (max_swizzle_size >= 8 && min_cta_dim >= 6) { + return 3; + } + else if (max_swizzle_size >= 4 && min_cta_dim >= 3) { + return 2; + } + else if (max_swizzle_size >= 2 && min_cta_dim >= 2) { + return 1; + } + else { + return 0; + } + } + + CUTLASS_HOST_DEVICE + static RasterOrder + get_rasterization_order( + uint32_t tiles_m, + uint32_t tiles_n, + RasterOrderOptions raster_order_option + ) { + + if (raster_order_option == RasterOrderOptions::Heuristic) { + if (tiles_n > tiles_m) { + return RasterOrder::AlongM; + } + else { + return RasterOrder::AlongN; + } + } + else { + switch (raster_order_option) { + case RasterOrderOptions::AlongN: + return RasterOrder::AlongN; + break; + default: + return RasterOrder::AlongM; + } + } + } + + // Get the number of CTA tiles in this problem. This variant of the method should only be used when + // problem_shape and tile_shape contain modes of only rank 1. + CUTLASS_HOST_DEVICE + static dim3 + get_tiled_cta_shape_mnl(BatchedGemmCoord problem_shape, GemmCoord cta_shape, GemmCoord cluster_shape) { + auto cta_m = (problem_shape.m() + cta_shape.m() - 1) / cta_shape.m(); + auto cta_n = (problem_shape.n() + cta_shape.n() - 1) / cta_shape.n(); + + return get_tiled_cta_shape_mnl(problem_shape, cluster_shape, cta_m, cta_n); + } + + // Version of get_tiled_cta_shape_mnl that takes in as input the number of CTAs in the M and N dimensions. + // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, + // for which using CuTe algebra for calculating tile shapes is easiest. + CUTLASS_HOST_DEVICE + static dim3 + get_tiled_cta_shape_mnl(BatchedGemmCoord problem_shape, GemmCoord cluster_shape, uint32_t cta_m, uint32_t cta_n) { + + // Round up to nearest multiple of cluster dim along each mode + auto problem_blocks_m = ((cta_m + cluster_shape.m() - 1) / cluster_shape.m()) * cluster_shape.m(); + auto problem_blocks_n = ((cta_n + cluster_shape.n() - 1) / cluster_shape.n()) * cluster_shape.n(); + + return { + static_cast(problem_blocks_m), + static_cast(problem_blocks_n), + static_cast(problem_shape.batch()) + }; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +// Parameters for SM90 persistent stream-K scheduler +struct PersistentTileSchedulerSm90StreamKParams { + + // Strategies for computing reductions between CTAs computing portions of a given output tile + enum class ReductionMode { + // Participating CTAs perform reduction in a turnstile fashion in order of the K extent + // covered by each CTA. This requires a lock to be held exclusively be the CTA that is + // currently accumulating. + // + // Turnstile accumulation ensures deterministic numeric behavior when using this mode. + Deterministic, + + // Participating CTAs perform reduction atomically to the same workspace (mostly) without locking. + // Locks are used only to wait for the first CTA to write its partial values (to initialize the + // workspace), and for all but the final CTA to have accumulated (so that the final CTA can load + // the accumulated value and accumulate it into registers on top of which the epilogue will + // be performed). + // + // Due to the nondeterminsitic ordering of accumulation, deterministic numeric behavior cannot + // be guaranteed with this mode (e.g., floating-point rounding error will depend on the order + // of accumulation) + Nondeterministic + }; + + using UnderlyingParams = PersistentTileSchedulerSm90Params; + using RasterOrder = UnderlyingParams::RasterOrder; + using RasterOrderOptions = UnderlyingParams::RasterOrderOptions; + + FastDivmodU64 divmod_cluster_shape_major_{}; + FastDivmodU64 divmod_cluster_shape_minor_{}; + FastDivmodU64 divmod_batch_{}; + FastDivmodU64 divmod_k_{}; + FastDivmodU64 divmod_cluster_blk_major_{}; + + int32_t log_swizzle_size_ = 0; + + uint64_t units_per_problem_ = 0; + RasterOrder raster_order_ = RasterOrder::AlongN; + + // The splitting factor to be used in a split-K decomposition of the problem. + // If this is set to a value greater than 1, stream-K decomposition logic + // is bypassed in favor of a split-K decomposition. + uint32_t splits_ = 1; + + // Number of tiled k iterations required to compute a single output tile. + uint32_t k_tiles_per_output_tile_ = 0; + + // Number of stream-K or split-K work units that compute an extra k iteration. + // This is done to handle residuals in dividing up the k iteration space. + // For stream-K, since the actual assignment of work to stream-K units will be done + // at the granularity of a cluster, we store only the number of big clusters. + uint32_t big_units_ = 0; + + // Workspace for holding partial accumulators to be reduced across stream-K/split-K units + void* reduction_workspace_ = nullptr; + + // Number of tiles covered by stream-K work units + uint32_t sk_tiles_ = 0; + + // Number of work units computing stream-K tiles + uint32_t sk_units_ = 0; + + // Number of tiled k iterations computed by each stream-K work unit. This + // can potentially cover more than one output tile. + uint32_t k_tiles_per_sk_unit_ = 0; + + // Strategy to use when reducing between collaborating CTAs + ReductionMode reduction_mode_ = ReductionMode::Deterministic; + + // Minimum number of tiled k that can be assigned to a stream-K unit + static constexpr uint32_t min_iters_per_sk_unit_ = 4u; + + // Initializes members. This variant of the method should only be used when + // problem_shape and tile_shape contain modes of only rank 1. + void + initialize( + BatchedGemmCoord problem_shape, + GemmCoord tile_shape, + GemmCoord cluster_shape, + KernelHardwareInfo hw_info, + int splits, + int max_swizzle, + RasterOrderOptions raster_order_option, + ReductionMode reduction_mode, + void* workspace + ) { + dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl( + problem_shape, tile_shape, cluster_shape); + + // Number of k tiles in each output tile + uint32_t k_tiles_per_output_tile = (problem_shape.k() + tile_shape.k() - 1) / tile_shape.k(); + + initialize( + problem_blocks, + k_tiles_per_output_tile, + cluster_shape, + hw_info, + splits, + max_swizzle, + raster_order_option, + reduction_mode, + workspace + ); + } + + // Version of initialize that takes in as input the number of CTAs in the M and N and L dimensions. + // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, + // for which using CuTe algebra for calculating tile shapes is easiest. + void + initialize( + dim3 problem_blocks, + uint32_t k_tiles_per_output_tile, + GemmCoord cluster_shape, + KernelHardwareInfo hw_info, + int splits, + int max_swizzle, + RasterOrderOptions raster_order_option, + ReductionMode reduction_mode, + void* workspace + ) { + UnderlyingParams underlying_params; + underlying_params.initialize( + problem_blocks, + cluster_shape, + hw_info, + max_swizzle, + raster_order_option + ); + + auto problem_blocks_m = problem_blocks.x; + auto problem_blocks_n = problem_blocks.y; + auto problem_blocks_l = problem_blocks.z; + + uint64_t output_tiles = problem_blocks_m * problem_blocks_n * problem_blocks_l; + + // Reduction workspace is at the beginning of the workspace. Lock workspace follows. + void* reduction_workspace = workspace; + + if (splits > 1) { + // Short circuit to basic split-K decomposition + + // Don't split by more than the available number of SMs + if (splits > hw_info.sm_count) { + splits = hw_info.sm_count; + } + + // Don't split by more than the K tile iterations + // + // splits is almost certainly nonnegative here (e.g., hw_info.sm_count, + // despite being an int, is a count), so it can safely be converted to unsigned + // in the comparison to avoid a signed-unsigned comparison warning-as-error. + if (static_cast(splits) > k_tiles_per_output_tile) { + splits = k_tiles_per_output_tile; + } + + set_params_basic( + underlying_params, + problem_blocks_m, + problem_blocks_n, + problem_blocks_l, + splits, + k_tiles_per_output_tile, + reduction_workspace, + reduction_mode + ); + return; + } + + // Calculate the maximum number of blocks from clusters of shape cluster_shape that we + // can fit within sm_count SMs. + dim3 grid = get_grid_shape( + problem_blocks, + cluster_shape, + hw_info, + max_swizzle, + raster_order_option + ); + + uint64_t ctas_per_wave = grid.x * grid.y; + + // The number of output tiles to be computed in stream-K and data-parallel fashion, respectively. + uint32_t sk_tiles = get_num_sk_tiles(output_tiles, ctas_per_wave, k_tiles_per_output_tile); + uint64_t dp_tiles = output_tiles - sk_tiles; + + if (sk_tiles == 0) { + // Short circuit to basic data-parallel decomposition + set_params_basic( + underlying_params, + problem_blocks_m, + problem_blocks_n, + problem_blocks_l, + /* splits = */ 1, + k_tiles_per_output_tile, + reduction_workspace, + reduction_mode + ); + return; + } + + // Calculate the number of work units covering the data-parallel and stream-K tiles. + // A "work unit" is a single index in the linearized ID space used by the scheduler. + // We distinguish it from a "block," which is typically tied to a hardware unit + // (e.g., the callers into this scheduler will be persistent thread blocks). + // A work unit can encompass multiple output tiles worth of work (as will be the + // case for stream-K blocks). + // Since splitting is not required for data-parallel tiles, only one data-parallel unit + // is needed per data-parallel tile. + uint64_t dp_units = dp_tiles; + + // Number of k iterations computed by the stream-K units as a whole + uint64_t k_tiles_sk_total = k_tiles_per_output_tile * sk_tiles; + + // If there are stream-K tiles to compute and a sufficiently large number of k iterations + // across them, they will be covered by a single wave of persistent threadblocks. Thus, there + // will be as many work units as there are threadblocks in a single wave. + // + // When the total k iterations across stream-K tiles is too small to justify distributing + // across an entire wave of blocks, we instead distribute the iterations over a smaller + // set of blocks. + + // Calculate the number of stream-K units that would be needed if each stream-K unit + // computed the minimum allowable k iterations. Truncate this to be in units of clusters. + auto cluster_size = cluster_shape.m() * cluster_shape.n(); + uint64_t min_sized_sk_units = (k_tiles_sk_total / min_iters_per_sk_unit_); + min_sized_sk_units = (min_sized_sk_units / cluster_size) * cluster_size; + + uint64_t sk_units = cutlass::platform::min(ctas_per_wave, min_sized_sk_units); + + // If the number of stream-K units is a multiple of the number of stream-K tiles, then + // the problem can leverage a basic split-K decomposition for the stream-K tiles. + if (sk_tiles < sk_units && sk_units % sk_tiles == 0) { + // Short circuit to basic split-K decomposition + uint32_t sk_splits = static_cast(sk_units / sk_tiles); + set_params_basic( + underlying_params, + problem_blocks_m, + problem_blocks_n, + problem_blocks_l, + sk_splits, + k_tiles_per_output_tile, + reduction_workspace, + reduction_mode + ); + return; + } + + // Number of k iterations computed per stream-K units + uint64_t k_tiles_per_sk_unit = k_tiles_sk_total / sk_units; + + // Number of stream-K units that need to compute extra iterations in order to cover + // the residual k iterations. This assumes that each such unit computes one additional + // iteration. + uint64_t sk_big_units = k_tiles_sk_total - (k_tiles_per_sk_unit * sk_units); + + // The division below is guaranteed to be exact because sk_big_units is guaranteed + // to be a multiple of cluster_size. This is useful because + // it allows us to use a block's linearized cluster ID to determine whether it is + // a big block. The reasoning behind this guarnatee is explained as follows: + // sk_big_units = k_tiles_sk_total - (k_tiles_per_sk_unit * sk_units); + // + // - k_tiles_sk_total is a multiple of cluster_size because it is the product + // of number of tail tiles and the number of k iterations per tile. Because + // both the number of output tiles and number of available SMs are rounded + // to be multiples of cluster shape, the number of tail tiles + // (output_tiles % avail_sms) is a multpile of cluster_size. + // + // - sk_units is a multiple of cluster_size because it is either blocks_per_wave + // or 0, and blocks_per_wave is a multiple of the cluster_size due to the grid-planning + // logic rounding to multiples of cluster dimensions + uint64_t sk_big_units_per_cluster = sk_big_units / cluster_size; + + divmod_cluster_shape_major_ = underlying_params.divmod_cluster_shape_major_; + divmod_cluster_shape_minor_ = underlying_params.divmod_cluster_shape_minor_; + divmod_batch_ = underlying_params.divmod_batch_; + divmod_k_ = FastDivmodU64(problem_blocks_m * problem_blocks_n); // Static k-splitting divmod. Unused for stream-K. + divmod_cluster_blk_major_ = underlying_params.divmod_cluster_blk_major_; + log_swizzle_size_ = underlying_params.log_swizzle_size_; + units_per_problem_ = static_cast(dp_units + sk_units); + raster_order_ = underlying_params.raster_order_; + splits_ = 1; // Static k-splitting factor. Unused for stream-K. + k_tiles_per_output_tile_ = k_tiles_per_output_tile; + big_units_ = static_cast(sk_big_units_per_cluster); + reduction_workspace_ = reduction_workspace; + sk_tiles_ = sk_tiles; + sk_units_ = static_cast(sk_units); + k_tiles_per_sk_unit_ = static_cast(k_tiles_per_sk_unit); + reduction_mode_ = reduction_mode; + } + + // Given the inputs, computes the physical grid we should launch. + // This variant of the method should only be used when + // problem_shape and tile_shape contain modes of only rank 1. + CUTLASS_HOST_DEVICE + static dim3 + get_grid_shape( + BatchedGemmCoord problem_shape, + GemmCoord cta_shape, + GemmCoord cluster_shape, + KernelHardwareInfo hw_info, + int max_swizzle_size, + RasterOrderOptions raster_order_option + ) { + + dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl(problem_shape, cta_shape, cluster_shape); + + return get_grid_shape( + problem_blocks, + cluster_shape, + hw_info, + max_swizzle_size, + raster_order_option + ); + } + + // Version of get_grid_shape that takes in as input the number of CTAs in the M and N and L dimensions. + // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, + // for which using CuTe algebra for calculating tile shapes is easiest. + CUTLASS_HOST_DEVICE + static dim3 + get_grid_shape( + dim3 problem_blocks, + GemmCoord cluster_shape, + KernelHardwareInfo hw_info, + int max_swizzle_size, + RasterOrderOptions raster_order_option + ) { + + // Call into the underlying get_grid_shape method, but do not allow the grid shape returned + // to be truncated based on the number of output tiles in the problem. + return UnderlyingParams::get_grid_shape( + problem_blocks, + cluster_shape, + hw_info, + max_swizzle_size, + raster_order_option, + /* truncate_by_problem_size = */false + ); + } + + // Returns the number of stream-K tiles that will be computed amongst `output_tiles` total + // output tiles on a device with `ctas_per_wave` CTAs in each wave. + static uint32_t + get_num_sk_tiles(uint64_t output_tiles, uint64_t ctas_per_wave, uint32_t k_tiles_per_output_tile) { + uint32_t full_waves = static_cast(output_tiles / ctas_per_wave); + uint32_t total_waves = static_cast((output_tiles + ctas_per_wave - 1) / ctas_per_wave); + + if (full_waves == total_waves || k_tiles_per_output_tile <= min_iters_per_sk_unit_) { + // All tiles will be data-parallel tiles if there is either no quantization + // or if there is no work to be split. + return 0; + } + + // + // The final wave is not full. Perform some stream-K work. + // + + // Rudimentary heuristic: prefer data-parallel decomposition if we have more than + // one wave and the tail wave is more than half full. This is subject to change. + if (full_waves != 0) { + uint64_t tail_tiles = output_tiles - (full_waves * ctas_per_wave); + if (tail_tiles >= (ctas_per_wave / 2)) { + return 0; + } + } + + // If there is wave quantization, assign the first two waves worth of tiles to be + // covered by stream-K work and the remainder to be data-parallel. Since we know + // that full_waves == total_waves - 1 in this case, the number of data-parallel + // waves is simply full_waves-1 (unless full_waves == 0). + uint32_t dp_waves = full_waves > 0 ? full_waves - 1 : 0; + + uint64_t dp_tiles = dp_waves * ctas_per_wave; + return static_cast(output_tiles - dp_tiles); + } + + // Calculates the size of the workspace needed for holding reduction barriers + CUTLASS_HOST_DEVICE + static int + get_barrier_workspace_size(uint64_t num_tiles, uint32_t mma_warp_groups, uint32_t barrier_bits) { + auto workspace_bits = num_tiles * mma_warp_groups * barrier_bits; + return round_up_to_l2_alignment(bits_to_bytes(static_cast(workspace_bits))); + } + + // Calculates the size of the workspace needed for holding partial outputs from splits + CUTLASS_HOST_DEVICE + static int + get_reduction_workspace_size(uint64_t num_tiles, GemmCoord tile_shape, uint32_t accumulator_bits) { + auto output_tile_size = tile_shape.m() * tile_shape.n(); + auto workspace_bits = accumulator_bits * output_tile_size * num_tiles; + return round_up_to_l2_alignment(bits_to_bytes(static_cast(workspace_bits))); + } + + #if !defined(__CUDACC_RTC__) + static void + get_workspace_component_sizes( + dim3 problem_blocks, + uint32_t k_tiles_per_output_tile, + GemmCoord tile_shape, + GemmCoord cluster_shape, + int& barrier_workspace_size, + int& reduction_workspace_size, + KernelHardwareInfo const& hw_info, + int splits, + int max_swizzle, + RasterOrderOptions raster_order_option, + uint32_t mma_warp_groups, + uint32_t barrier_bits, + uint32_t accumulator_bits) { + + // Workspace is needed only for output tiles that will be split. Thus, we first determine the number + // of output tiles that will be split, and then calculate the workspace needed to cover these. + uint64_t output_tiles = problem_blocks.x * problem_blocks.y * problem_blocks.z; + + if (splits > 1) { + // Basic split-K variant requires workspace for all output tiles + barrier_workspace_size = get_barrier_workspace_size(output_tiles, mma_warp_groups, barrier_bits); + reduction_workspace_size = get_reduction_workspace_size(output_tiles, tile_shape, accumulator_bits); + } + else { + KernelHardwareInfo new_hw_info; + new_hw_info.device_id = hw_info.device_id; + new_hw_info.sm_count = hw_info.sm_count; + if (new_hw_info.sm_count <= 0) { + CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n" + " For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count."); + new_hw_info.sm_count = KernelHardwareInfo::query_device_multiprocessor_count(new_hw_info.device_id); + } + + dim3 grid = get_grid_shape( + problem_blocks, + cluster_shape, + new_hw_info, + max_swizzle, + raster_order_option + ); + uint64_t ctas_per_wave = grid.x * grid.y; + uint32_t sk_tiles = get_num_sk_tiles(output_tiles, ctas_per_wave, static_cast(k_tiles_per_output_tile)); + + barrier_workspace_size = get_barrier_workspace_size(sk_tiles, mma_warp_groups, barrier_bits); + reduction_workspace_size = get_reduction_workspace_size(sk_tiles, tile_shape, accumulator_bits); + } + } + #endif // !defined(__CUDACC_RTC__) + + // Get the amount of scratch workspace needed for the kernel. This variant of the method should only be used when + // problem_shape and tile_shape contain modes of only rank 1. + static int + get_workspace_size( + BatchedGemmCoord problem_shape, + GemmCoord tile_shape, + GemmCoord cluster_shape, + KernelHardwareInfo const& hw_info, + int splits, + int max_swizzle, + RasterOrderOptions raster_order_option, + uint32_t mma_warp_groups, + uint32_t barrier_bits, + uint32_t element_accumulator_bits) { + + dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl(problem_shape, tile_shape, cluster_shape); + uint32_t k_tiles_per_output_tile = (problem_shape.k() + tile_shape.k() - 1) / tile_shape.k(); + + return get_workspace_size( + problem_blocks, + k_tiles_per_output_tile, + tile_shape, + cluster_shape, + hw_info, + splits, + max_swizzle, + raster_order_option, + mma_warp_groups, + barrier_bits, + element_accumulator_bits + ); + } + + // Version of get_workspace_size that takes in as input the number of CTAs in the M and N dimensions. + // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, + // for which using CuTe algebra for calculating tile shapes is easiest. + static int + get_workspace_size( + dim3 problem_blocks, + uint32_t k_tiles_per_output_tile, + GemmCoord tile_shape, + GemmCoord cluster_shape, + KernelHardwareInfo const& hw_info, + int splits, + int max_swizzle, + RasterOrderOptions raster_order_option, + uint32_t mma_warp_groups, + uint32_t barrier_bits, + uint32_t element_accumulator_bits) { + + int barrier_workspace_size = 0; + int reduction_workspace_size = 0; + + #if !defined(__CUDACC_RTC__) + get_workspace_component_sizes( + problem_blocks, + k_tiles_per_output_tile, + tile_shape, + cluster_shape, + barrier_workspace_size, + reduction_workspace_size, + hw_info, + splits, + max_swizzle, + raster_order_option, + mma_warp_groups, + barrier_bits, + element_accumulator_bits + ); + #endif + + return barrier_workspace_size + reduction_workspace_size; + } + + // Initialize the workspace to be used for the kernel. This variant of the method should only be used when + // problem_shape and tile_shape contain modes of only rank 1. + static cutlass::Status + initialize_workspace( + void* workspace, + cudaStream_t stream, + BatchedGemmCoord problem_shape, + GemmCoord tile_shape, + GemmCoord cluster_shape, + KernelHardwareInfo const& hw_info, + int splits, + int max_swizzle, + RasterOrderOptions raster_order_option, + uint32_t mma_warp_groups, + uint32_t barrier_bits, + uint32_t element_accumulator_bits) { + + dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl(problem_shape, tile_shape, cluster_shape); + uint32_t k_tiles_per_output_tile = (problem_shape.k() + tile_shape.k() - 1) / tile_shape.k(); + + return initialize_workspace( + workspace, + stream, + problem_blocks, + k_tiles_per_output_tile, + tile_shape, + cluster_shape, + hw_info, + splits, + max_swizzle, + raster_order_option, + mma_warp_groups, + barrier_bits, + element_accumulator_bits + ); + } + + // Version of initialize_workspace that takes in as input the number of CTAs in the M and N dimensions. + // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, + // for which using CuTe algebra for calculating tile shapes is easiest. + static cutlass::Status + initialize_workspace( + void* workspace, + cudaStream_t stream, + dim3 problem_blocks, + uint32_t k_tiles_per_output_tile, + GemmCoord tile_shape, + GemmCoord cluster_shape, + KernelHardwareInfo const& hw_info, + int splits, + int max_swizzle, + RasterOrderOptions raster_order_option, + uint32_t mma_warp_groups, + uint32_t barrier_bits, + uint32_t element_accumulator_bits) { + + #if !defined(__CUDACC_RTC__) + int barrier_workspace_size = 0; + int reduction_workspace_size = 0; + + get_workspace_component_sizes( + problem_blocks, + k_tiles_per_output_tile, + tile_shape, + cluster_shape, + barrier_workspace_size, + reduction_workspace_size, + hw_info, + splits, + max_swizzle, + raster_order_option, + mma_warp_groups, + barrier_bits, + element_accumulator_bits + ); + + if (barrier_workspace_size > 0) { + if (workspace == nullptr) { + return Status::kErrorWorkspaceNull; + } + + // Only the barrier workspace needs to be cleared for stream-K. + // Barrier workspace follows reduction workspace. + uint8_t* barrier_workspace = reinterpret_cast(workspace) + reduction_workspace_size; + return zero_workspace(static_cast(barrier_workspace), barrier_workspace_size, stream); + } + #endif // !defined(__CUDACC_RTC__) + + return Status::kSuccess; + } + + void + set_params_basic( + UnderlyingParams const& underlying_params, + uint32_t blocks_m, + uint32_t blocks_n, + uint32_t blocks_l, + uint32_t splits, + uint32_t k_tiles_per_output_tile, + void* reduction_workspace, + ReductionMode reduction_mode) { + + divmod_cluster_shape_major_ = underlying_params.divmod_cluster_shape_major_, + divmod_cluster_shape_minor_ = underlying_params.divmod_cluster_shape_minor_, + divmod_batch_ = FastDivmodU64(blocks_m * blocks_n * splits), + divmod_k_ = FastDivmodU64(blocks_m * blocks_n), + divmod_cluster_blk_major_ = underlying_params.divmod_cluster_blk_major_, + log_swizzle_size_ = underlying_params.log_swizzle_size_, + units_per_problem_ = blocks_m * blocks_n * blocks_l * splits, + raster_order_ = underlying_params.raster_order_, + splits_ = splits, + k_tiles_per_output_tile_ = k_tiles_per_output_tile, + big_units_ = k_tiles_per_output_tile % splits, + reduction_workspace_ = reduction_workspace; + reduction_mode_ = reduction_mode; + + // No stream-K work is performed for "basic" data-parallel and split-K decompositions + sk_tiles_ = 0; + sk_units_ = 0; + k_tiles_per_sk_unit_ = 0; + } + +private: + // Round up number of bytes to the nearest multiple of L2 cache line alignment + CUTLASS_HOST_DEVICE + static int + round_up_to_l2_alignment(int bytes) { + constexpr static uint32_t L2CacheLineSizeBytes = 128; + return (bytes + L2CacheLineSizeBytes - 1) / L2CacheLineSizeBytes * L2CacheLineSizeBytes; + } +}; + +//////////////////////////////////////////////////////////////////////////////// +} // namespace detail +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/trmm_universal.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/trmm_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..bca9450b8e37607c80bc65e491b267fb1aef4055 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/kernel/trmm_universal.h @@ -0,0 +1,599 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" +#include "cutlass/core_io.h" +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + SideMode SideMode_, ///! Side Mode for the kernel (kLeft or kRight) + FillMode FillMode_, ///! Fill Mode for triangular matrix (kLower or kUpper) + DiagType DiagType_ ///! Diag Type for triangular matrix (kNonUnit or kUnit) +> +struct TrmmUniversal { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + static SideMode const kSideMode = SideMode_; + static FillMode const kFillMode = FillMode_; + static DiagType const kDiagType = DiagType_; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Split-K preserves splits that are 128b aligned + static int const kSplitKAlignment = const_max(128 / sizeof_bits::value, 128 / sizeof_bits::value); + + // + // Structures + // + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord problem_size; + int batch_count; + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_D; + + typename LayoutA::Stride::Index lda; + typename LayoutB::Stride::Index ldb; + typename LayoutC::Stride::Index ldd; + + // + // Methods + // + + Arguments(): + mode(GemmUniversalMode::kGemm), + batch_count(1), + ptr_A(nullptr), ptr_B(nullptr), ptr_D(nullptr) { } + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_D, + typename LayoutA::Stride::Index lda, + typename LayoutB::Stride::Index ldb, + typename LayoutC::Stride::Index ldd + ): + mode(mode), + problem_size(problem_size), + batch_count(batch_count), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_D(ptr_D), + batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_D(batch_stride_D), + lda(lda), ldb(ldb), ldd(ldd) { + } + + /// Returns arguments for the transposed problem sizes + Arguments transposed_problem_size() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + + return args; + } + + /// Returns arguments for the transposed matrices + Arguments swapped_matrices() const { + Arguments args(*this); + + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.batch_stride_A, args.batch_stride_B); + + return args; + } + }; + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params { + + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + typename Epilogue::OutputTileIterator::Params params_D; + + typename EpilogueOutputOp::Params output_op; + + GemmUniversalMode mode; + int batch_count; + int gemm_k_size; + + void * ptr_A; + void * ptr_B; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_D; + + int *semaphore; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): + swizzle_log_tile(0), + params_A(0), + params_B(0), + params_D(0), + batch_count(0), + gemm_k_size(0), + mode(cutlass::gemm::GemmUniversalMode::kGemm), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_D(nullptr), + batch_stride_A(0), + batch_stride_B(0), + batch_stride_D(0), + semaphore(nullptr) { } + + CUTLASS_HOST_DEVICE + Params( + Arguments const &args, + cutlass::gemm::GemmCoord const & grid_tiled_shape, + int gemm_k_size, + void *workspace = nullptr + ): + problem_size(args.problem_size), + grid_tiled_shape(grid_tiled_shape), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A(args.lda), + params_B(args.ldb), + params_D(args.ldd), + output_op(args.epilogue), + mode(args.mode), + batch_count(args.batch_count), + gemm_k_size(gemm_k_size), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_D(args.ptr_D), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_D(args.batch_stride_D), + semaphore(static_cast(workspace)) { + } + + CUTLASS_HOST_DEVICE + void update( + Arguments const &args, + void *workspace = nullptr) { + + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_D = args.ptr_D; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_D = args.batch_stride_D; + + output_op = args.epilogue; + + semaphore = static_cast(workspace); + } + + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + +public: + + // + // Methods + // + + CUTLASS_DEVICE + TrmmUniversal() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || + (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || + (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { + + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; + ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[threadblock_tile_offset.k()]; + ptr_B = static_cast(params.ptr_B)[threadblock_tile_offset.k()]; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_B{ + offset_k, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + /****************************************************************************************************** + First two cases: (Left Side, Lower Fill) and (Right Side, Upper Fill) are transpose of each other + - (Left Side, Lower Fill): calculate bottom of the CTA tile, then find the k-iterations + needed to process all elements till that coordinate. + - (Right Side, Upper Fill): calculate right end of the CTA tile, then find the k-iterations + needed to process all elements till that coordinate. + + Last two cases: (Left Side, Upper Fill) and (Right Side, Lower Fill) are transpose of each other + - (Left Side, Upper Fill): calculate the top of the CTA tile, then find k-iterations + that can be skipped for all elements of this tile. + - (Right Side, Lower Fill): calculate the left start of the CTA tile, then find k-iterations + that can be skipped for all elements of this tile. + ********************************************************************************************************/ + + if (kSideMode == SideMode::kLeft && kFillMode == FillMode::kLower) { + + int k_iterations_till_diagonal = ((threadblock_tile_offset.m() + 1) * Mma::Shape::kM + Mma::Shape::kK - 1) / Mma::Shape::kK; + if (k_iterations_till_diagonal < gemm_k_iterations) { + gemm_k_iterations = k_iterations_till_diagonal; + } + + } else if (kSideMode == SideMode::kRight && kFillMode == FillMode::kUpper) { + + int k_iterations_till_diagonal = ((threadblock_tile_offset.n() + 1) * Mma::Shape::kN + Mma::Shape::kK - 1) / Mma::Shape::kK; + if (k_iterations_till_diagonal < gemm_k_iterations) { + gemm_k_iterations = k_iterations_till_diagonal; + } + + } else if (kSideMode == SideMode::kLeft && kFillMode == FillMode::kUpper) { + + int k_iterations_till_diagonal = ((threadblock_tile_offset.m()) * Mma::Shape::kM) / Mma::Shape::kK; + + if (k_iterations_till_diagonal != 0) { + tb_offset_A += cutlass::MatrixCoord({0, k_iterations_till_diagonal * Mma::Shape::kK}); + tb_offset_B += cutlass::MatrixCoord({k_iterations_till_diagonal * Mma::Shape::kK, 0}); + gemm_k_iterations -= k_iterations_till_diagonal; + } + + } else if (kSideMode == SideMode::kRight && kFillMode == FillMode::kLower) { + + int k_iterations_till_diagonal = ((threadblock_tile_offset.n()) * Mma::Shape::kN) / Mma::Shape::kK; + + if (k_iterations_till_diagonal != 0) { + tb_offset_A += cutlass::MatrixCoord({0, k_iterations_till_diagonal * Mma::Shape::kK}); + tb_offset_B += cutlass::MatrixCoord({k_iterations_till_diagonal * Mma::Shape::kK, 0}); + gemm_k_iterations -= k_iterations_till_diagonal; + } + + } + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_D = static_cast(params.ptr_D); + + // + // Fetch pointers based on mode. + // + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + } + + + // Tile iterator loading from source tensor (although irrelevant to this kernel as beta is zero). + typename Epilogue::OutputTileIterator iterator_C( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + __threadfence(); + } + + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + // + // Release the semaphore + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_ell_mma.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_ell_mma.h new file mode 100644 index 0000000000000000000000000000000000000000..7e4d765026a5a611b2296fdc00aaa9563c3695b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_ell_mma.h @@ -0,0 +1,734 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Default template for a Blocked-Ell MMA. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/arch.h" +#include "cutlass/arch/wmma.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +#include "cutlass/gemm/threadblock/default_mma_core_wmma.h" +#endif //CUTLASS_ARCH_WMMA_ENABLED + +#include "cutlass/gemm/threadblock/ell_mma_pipelined.h" +#include "cutlass/gemm/threadblock/ell_mma_multistage.h" +#include "cutlass/transform/threadblock/ell_predicated_tile_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Operator class tag + typename OperatorClass_, + /// Tag indicating architecture to tune for + typename ArchTag_, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape_, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape_, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape_, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false + > +struct DefaultEllMma; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output (OperatorClass Simt) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Operation performed by GEMM + typename Operator> +struct DefaultEllMma { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, + arch::OpClassSimt, 2, Operator>; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::EllPredicatedTileIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::EllPredicatedTileIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>; + + // Define the threadblock-scoped pipelined matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, + layout::RowMajor, typename MmaCore::MmaPolicy>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output (OperatorClass TensorOp) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Operation performed by GEMM + typename Operator + > +struct DefaultEllMma { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, + arch::OpClassTensorOp, 2, Operator>; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::EllPredicatedTileIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::EllPredicatedTileIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>; + + // Define the threadblock-scoped pipelined matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, + layout::RowMajor, typename MmaCore::MmaPolicy>; +}; + +//////////////////////////////////////////////////////////////////////////////// +/// Specialization for row-major output (OperatorClass TensorOp) +template < + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Operation performed by GEMM + typename Operator + > +struct DefaultEllMma { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, float, LayoutA, float, + LayoutB, float, layout::RowMajor, arch::OpClassTensorOp, 2, + arch::OpMultiplyAddFastF16>; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::EllPredicatedTileIterator< + cutlass::MatrixShape, + float, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::EllPredicatedTileIterator< + cutlass::MatrixShape, + float, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>; + + // Define the threadblock-scoped pipelined matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, float, + layout::RowMajor, typename MmaCore::MmaPolicy>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for column-major-interleaved output +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Operation performed by GEMM + typename Operator, + /// Number of Interleaved K + int InterleavedK> +struct DefaultEllMma, OperatorClass, + ArchTag, ThreadblockShape, WarpShape, InstructionShape, 2, + Operator, true> { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, + layout::ColumnMajorInterleaved, OperatorClass, 2, Operator, + true>; + + static_assert(kAlignmentA == 128 / sizeof_bits::value, + "Alignment must match thread data map's vector length"); + + static_assert(kAlignmentB ==128 / sizeof_bits::value, + "Alignment must match thread data map's vector length"); + + // Define iterators over tiles from the A operand + using IteratorA = cutlass::transform::threadblock::EllPredicatedTileIterator< + cutlass::MatrixShape, ElementA, + LayoutA, 1, typename MmaCore::IteratorThreadMapA>; + + // Define iterators over tiles from the B operand + using IteratorB = cutlass::transform::threadblock::EllPredicatedTileIterator< + cutlass::MatrixShape, ElementB, + LayoutB, 0, typename MmaCore::IteratorThreadMapB>; + + // Define the threadblock-scoped pipelined matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, + layout::ColumnMajorInterleaved, + typename MmaCore::MmaPolicy>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator + > +struct DefaultEllMma { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, + Stages, Operator>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::EllPredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::EllPredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::EllMmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output (OperatorClass TensorOp) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator + > +struct DefaultEllMma { + static cutlass::arch::CacheOperation::Kind const CacheOpA = + ((sizeof_bits::value * kAlignmentA) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + static cutlass::arch::CacheOperation::Kind const CacheOpB = + ((sizeof_bits::value * kAlignmentB) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, + Stages, Operator, false, CacheOpA, CacheOpB>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::EllPredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::EllPredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::EllMmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for column-major-interleaved output +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// Number of Interleaved K + int InterleavedK> +struct DefaultEllMma, OperatorClass, + ArchTag, ThreadblockShape, WarpShape, InstructionShape, + Stages, Operator, true> { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, + layout::ColumnMajorInterleaved, OperatorClass, Stages, + Operator, true>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::EllPredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::EllPredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::EllMmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for SIMT IDP4A Kernels +template < + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Operation performed by GEMM + typename Operator, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape> +struct DefaultEllMma, 2, + Operator, false> { + using InstructionShape = GemmShape<1, 1, 4>; + using ElementA = int8_t; + using ElementB = int8_t; + using OperatorClass = arch::OpClassSimt; + + static const bool transposeA = cutlass::platform::is_same< LayoutA, layout::ColumnMajor >::value; + static const bool transposeB = cutlass::platform::is_same< LayoutB, layout::RowMajor >::value; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, + OperatorClass, 2, Operator>; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + cutlass::MatrixShape, + ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, transposeA>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + cutlass::MatrixShape, + ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, transposeB>; + + // Define the threadblock-scoped pipelined matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, + layout::RowMajor, typename MmaCore::MmaPolicy>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +/// Specialization for Wmma TensorOp operator with 2 staged pipeline +template < + ///< Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Operation performed by GEMM + typename Operator> +struct DefaultEllMma { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, LayoutC, + arch::OpClassWmmaTensorOp, 2, Operator>; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::EllPredicatedTileIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::EllPredicatedTileIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>; + + // Define the threadblock-scoped pipelined matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, + LayoutC, typename MmaCore::MmaPolicy>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for Wmma TensorOp operator with 1 staged pipeline +template < + ///< Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Operation performed by GEMM + typename Operator> +struct DefaultEllMma { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, LayoutC, + arch::OpClassWmmaTensorOp, 1, Operator>; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::EllPredicatedTileIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::EllPredicatedTileIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>; + + // Define the threadblock-scoped singlestage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaSingleStage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, + LayoutC, typename MmaCore::MmaPolicy>; +}; + +//////////////////////////////////////////////////////////////////////////////// +#endif //CUTLASS_ARCH_WMMA_ENABLED + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_gemv_core.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_gemv_core.h new file mode 100644 index 0000000000000000000000000000000000000000..afb74e727c1a61be67ae77d6c6bc1ece30f76091 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_gemv_core.h @@ -0,0 +1,151 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines basic properties needed by CTA-level batched GEMV assuming expectations about data + layout of the global memory fragments, data types, and internal tile sizes. + + Partial specializations for threadblock::Mma operations targeting SIMT instructions. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/layout/matrix.h" + +#include "cutlass/platform/platform.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/thread/mma.h" + +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "cutlass/transform/pitch_linear_thread_map.h" + +#include "cutlass/gemm/threadblock/gemv.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// +namespace cutlass { +namespace gemm { +namespace threadblock { + +/// Template defininng default vector-matrix multiply operators inferred from threadblock tile size, +/// global memory data layout. +template < + typename Shape_, /// Shape of the threadblock vector-matrix multiply operator + typename ThreadShape_, /// Shape of per-thread vector-matrix multiply operator + typename ElementA_, /// Element data type of A operand + typename LayoutA_, /// Layout of operand A + typename ElementB_, /// Element data type of B operand + typename LayoutB_, /// Layout of operand B + typename ElementC_, /// Data type of accumulator + typename LayoutC_ /// Layout of accumulator +> +struct DefaultGemvCore { + + using Shape = Shape_; + using ThreadShape = ThreadShape_; + + using LayoutA = LayoutA_; + using LayoutB = LayoutB_; + using LayoutC = LayoutC_; + + using ElementA = ElementA_; + using ElementB = ElementB_; + using ElementC = ElementC_; + + static int const kThreadsPerN = Shape::kN / ThreadShape::kN; + + using IteratorPolicyA = typename platform::conditional< + platform::is_same::value, + cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous< + layout::PitchLinearShape, 1, ThreadShape::kK>, + cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided< + layout::PitchLinearShape, 1, ThreadShape::kM>>::type; + + using IteratorA = cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, ElementA, LayoutA, 1, IteratorPolicyA>; + + using IteratorPolicyB = typename platform::conditional< + platform::is_same::value, + cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous< + layout::PitchLinearShape, kThreadsPerN, ThreadShape::kN>, + cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided< + layout::PitchLinearShape, kThreadsPerN, ThreadShape::kK>>::type; + + using IteratorB = cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, ElementB, LayoutB, 0, IteratorPolicyB>; + + using IteratorPolicyC = typename platform::conditional< + platform::is_same::value, + cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous< + layout::PitchLinearShape, kThreadsPerN, ThreadShape::kN>, + cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided< + layout::PitchLinearShape, kThreadsPerN, ThreadShape::kM>>::type; + + using IteratorC = cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, ElementC, LayoutC, 0, IteratorPolicyC>; + + using MmaSimtOp = typename cutlass::gemm::thread::Mma< + cutlass::gemm::GemmShape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC>; + + using Operator = MmaSimtOp; + + // Assertions for correctness + static_assert((Shape::kM == 1), "M=1 is required for GEMV"); + + static_assert((ThreadShape::kM == 1), "M=1 is required for GEMV"); + + static_assert(Shape::kK % ThreadShape::kK == 0, "Shape::K must be a multiple of ThreadShape::K"); + + static_assert(((ThreadShape::kK == 1) || + (ThreadShape::kK == 2) || + (ThreadShape::kK == 4) || + (ThreadShape::kK == 8) || + (ThreadShape::kK == 16) || + (ThreadShape::kK == 32) + ), + "ThreadShape::K must be a 1, 2, 4, 8, 16 or 32"); +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma.h new file mode 100644 index 0000000000000000000000000000000000000000..69e3f0d236c9915e1a7cef6f881e1f6107d0a41c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma.h @@ -0,0 +1,823 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/arch.h" +#include "cutlass/arch/wmma.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/permute.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +#include "cutlass/gemm/threadblock/default_mma_core_wmma.h" +#endif //CUTLASS_ARCH_WMMA_ENABLED + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Operator class tag + typename OperatorClass_, + /// Tag indicating architecture to tune for + typename ArchTag_, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape_, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape_, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape_, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone, + /// Gather operand A by using an index array + bool GatherA = false, + /// Gather operand B by using an index array + bool GatherB = false, + /// Permute operand A + typename PermuteALayout = layout::NoPermute, + /// Permute operand B + typename PermuteBLayout = layout::NoPermute + > +struct DefaultMma; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output (OperatorClass Simt) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Layout type for C and D matrix operand + typename LayoutC, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Operation performed by GEMM + typename Operator, + /// Gather operand A by using an index array + bool GatherA, + /// Gather operand B by using an index array + bool GatherB, + /// Permute operand A + typename PermuteALayout, + /// Permute operand B + typename PermuteBLayout + > +struct DefaultMma { + + static_assert(platform::is_same::value + || platform::is_same>::value, + "simt epilogue must be row major"); + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, LayoutC, + arch::OpClassSimt, 2, Operator>; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA, + GatherA, PermuteALayout>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB, + GatherB, PermuteBLayout>; + + // Define the threadblock-scoped pipelined matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, + LayoutC, typename MmaCore::MmaPolicy>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output (OperatorClass TensorOp) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Operation performed by GEMM + typename Operator, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear, + /// Gather operand A by using an index array + bool GatherA, + /// Gather operand B by using an index array + bool GatherB, + /// Permute operand A + typename PermuteALayout, + /// Permute operand B + typename PermuteBLayout + > +struct DefaultMma { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, + arch::OpClassTensorOp, 2, Operator>; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA, + GatherA, PermuteALayout>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB, + GatherB, PermuteBLayout>; + + // Define the threadblock-scoped pipelined matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, + layout::RowMajor, typename MmaCore::MmaPolicy>; +}; + +//////////////////////////////////////////////////////////////////////////////// +/// Specialization for row-major output (OperatorClass TensorOp) +template < + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Operation performed by GEMM + typename Operator, + /// Gather operand A by using an index array + bool GatherA, + /// Gather operand B by using an index array + bool GatherB, + /// Permute operand A + typename PermuteALayout, + /// Permute operand B + typename PermuteBLayout + > +struct DefaultMma { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, float, LayoutA, float, + LayoutB, float, layout::RowMajor, arch::OpClassTensorOp, 2, + arch::OpMultiplyAddFastF16>; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + float, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA, + GatherA, PermuteALayout>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + float, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB, + GatherB, PermuteBLayout>; + + // Define the threadblock-scoped pipelined matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, float, + layout::RowMajor, typename MmaCore::MmaPolicy>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for column-major-interleaved output +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Operation performed by GEMM + typename Operator, + /// Number of Interleaved K + int InterleavedK> +struct DefaultMma, OperatorClass, + ArchTag, ThreadblockShape, WarpShape, InstructionShape, 2, + Operator, true, SharedMemoryClearOption::kNone, false, false, + layout::NoPermute, layout::NoPermute> { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, + layout::ColumnMajorInterleaved, OperatorClass, 2, Operator, + true>; + + static_assert(kAlignmentA == 128 / sizeof_bits::value, + "Alignment must match thread data map's vector length"); + + static_assert(kAlignmentB ==128 / sizeof_bits::value, + "Alignment must match thread data map's vector length"); + + // Define iterators over tiles from the A operand + using IteratorA = cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, ElementA, + LayoutA, 1, typename MmaCore::IteratorThreadMapA>; + + // Define iterators over tiles from the B operand + using IteratorB = cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, ElementB, + LayoutB, 0, typename MmaCore::IteratorThreadMapB>; + + // Define the threadblock-scoped pipelined matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, + layout::ColumnMajorInterleaved, + typename MmaCore::MmaPolicy>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Layout type for C and D matrix operand + typename LayoutC, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator, + /// Gather operand A by using an index array + bool GatherA, + /// Gather operand B by using an index array + bool GatherB, + /// Permute operand A + typename PermuteALayout, + /// Permute operand B + typename PermuteBLayout + > +struct DefaultMma { + + static_assert(platform::is_same::value + || platform::is_same>::value, + "simt epilogue must be row major"); + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, LayoutC, arch::OpClassSimt, + Stages, Operator>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, AccessTypeA, GatherA, PermuteALayout>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, AccessTypeB, GatherB, PermuteBLayout>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, LayoutC, + typename MmaCore::MmaPolicy, Stages>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output (OperatorClass TensorOp) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Layout type for C and D matrix operand + typename LayoutC, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear, + /// Gather operand A by using an index array + bool GatherA, + /// Gather operand B by using an index array + bool GatherB, + /// Permute operand A + typename PermuteALayout, + /// Permute operand B + typename PermuteBLayout + > +struct DefaultMma { + + static_assert(platform::is_same::value + || platform::is_same>::value, + "simt epilogue must be row major"); + + static cutlass::arch::CacheOperation::Kind const CacheOpA = + ((sizeof_bits::value * kAlignmentA) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + static cutlass::arch::CacheOperation::Kind const CacheOpB = + ((sizeof_bits::value * kAlignmentB) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, LayoutC, arch::OpClassTensorOp, + Stages, Operator, false, CacheOpA, CacheOpB>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, AccessTypeA, GatherA, PermuteALayout>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, AccessTypeB, GatherB, PermuteBLayout>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, LayoutC, + typename MmaCore::MmaPolicy, Stages, SharedMemoryClear>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for column-major-interleaved output +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// Number of Interleaved K + int InterleavedK> +struct DefaultMma, OperatorClass, + ArchTag, ThreadblockShape, WarpShape, InstructionShape, + Stages, Operator, true, SharedMemoryClearOption::kNone, + false, false, layout::NoPermute, layout::NoPermute> { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, + layout::ColumnMajorInterleaved, OperatorClass, Stages, + Operator, true>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for SIMT IDP4A Kernels +template < + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Operation performed by GEMM + typename Operator, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape> +struct DefaultMma, 2, + Operator, false, SharedMemoryClearOption::kNone, + false, false, layout::NoPermute, layout::NoPermute> { + using InstructionShape = GemmShape<1, 1, 4>; + using ElementA = int8_t; + using ElementB = int8_t; + using OperatorClass = arch::OpClassSimt; + + static const bool transposeA = platform::is_same< LayoutA, layout::ColumnMajor >::value; + static const bool transposeB = platform::is_same< LayoutB, layout::RowMajor >::value; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, + OperatorClass, 2, Operator>; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + cutlass::MatrixShape, + ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, transposeA>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< + cutlass::MatrixShape, + ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, transposeB>; + + // Define the threadblock-scoped pipelined matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, + layout::RowMajor, typename MmaCore::MmaPolicy>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +/// Specialization for Wmma TensorOp operator with 2 staged pipeline +template < + ///< Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Operation performed by GEMM + typename Operator> +struct DefaultMma { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, LayoutC, + arch::OpClassWmmaTensorOp, 2, Operator>; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>; + + // Define the threadblock-scoped pipelined matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, + LayoutC, typename MmaCore::MmaPolicy>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for Wmma TensorOp operator with 1 staged pipeline +template < + ///< Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Operation performed by GEMM + typename Operator> +struct DefaultMma { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, LayoutC, + arch::OpClassWmmaTensorOp, 1, Operator>; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>; + + // Define the threadblock-scoped singlestage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaSingleStage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, + LayoutC, typename MmaCore::MmaPolicy>; +}; + +//////////////////////////////////////////////////////////////////////////////// +#endif //CUTLASS_ARCH_WMMA_ENABLED + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core.h new file mode 100644 index 0000000000000000000000000000000000000000..3d7ffe95db94b45c5d640bb97c90797801be91b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core.h @@ -0,0 +1,116 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data + layout of the global memory fragments, data types, and internal tile sizes. + + Partial specializations for threadblock::Mma operations targeting TensorOp instructions. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/gemm/warp/mma.h" +#include "cutlass/gemm/threadblock/mma_pipelined.h" +#include "cutlass/gemm/threadblock/mma_singlestage.h" +#include "cutlass/arch/cache_operation.h" +#include "cutlass/arch/mma.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Template defininng default matrix multiply operators inferred from threadblock tile size, +/// global memory data layout, and target math instruction. +template < + /// Shape of threadblock-scoped matrix multiply operator + typename Shape, + /// Shape of warp-level matrix multiply operator + typename WarpShape, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape, + /// Element data type of A operand + typename ElementA, + /// Layout of operand A + typename LayoutA, + /// Element data type of B operand + typename ElementB, + /// Layout of operand B + typename LayoutB, + /// Data type of accumulator + typename ElementC, + /// Layout of accumulator + typename LayoutC, + /// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp) + typename OperatorClass, + /// Number of stages + int Stages = 2, + /// Operation performed by MMA + typename Operator = typename platform::conditional< + (platform::is_same::value) && + (platform::is_same::value || + platform::is_same::value || + platform::is_same::value || + platform::is_same::value), + cutlass::arch::OpMultiplyAddSaturate, + cutlass::arch::OpMultiplyAdd>::type, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA = + cutlass::arch::CacheOperation::Global, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB = + cutlass::arch::CacheOperation::Global, + /// per-element transformation for elements of A + ComplexTransform TransformA = ComplexTransform::kNone, + /// per-element transformation for elements of B + ComplexTransform TransformB = ComplexTransform::kNone, + bool IsComplex = false // (is_complex::value || is_complex::value) +> +struct DefaultMmaCore; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_simt.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_simt.h new file mode 100644 index 0000000000000000000000000000000000000000..a6d8ec044078ad87b147ed72fb31834f1616d2e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_simt.h @@ -0,0 +1,1723 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data + layout of the global memory fragments, data types, and internal tile sizes. + + Partial specializations for threadblock::Mma operations targeting simt instructions. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/fast_math.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + + +#include "cutlass/transform/pitch_linear_thread_map.h" +#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h" +#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h" + +#include "cutlass/gemm/warp/mma_simt_policy.h" +#include "cutlass/gemm/warp/mma_simt.h" +#include "cutlass/gemm/threadblock/default_mma_core.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +namespace detail { + +// convert a WarpShape which is the whole tile of elements into warp num threads. +// The goal is for each thread's tile of elements to be as square as possible +// for performance (4x4 will be faster than 2x8). +template +constexpr int simt_get_warp_threads_m() { + return (WarpShape::kM > WarpShape::kN) ? 8 : 4; +} + +/// Computes padding in shared memory to perform efficient transpose without bank conflicts. +constexpr int simt_transpose_padding(int threads, int crosswise, int size_in_bits) { + return (size_in_bits >= 32 ? + threads / crosswise / (size_in_bits / 32) : + threads / crosswise * (32 / size_in_bits) + ); +} + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: row-major +/// Operator: simt class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, ElementA_, + layout::ColumnMajor, ElementB_, layout::RowMajor, + ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ + > { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 1>; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassSimt; + static int const PartitionsK = Shape::kK / WarpShape::kK; + + /// Default Operator + using Operator = Operator_; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + PartitionsK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + static int const kElementsPerAccess = 1; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajor; + using SmemLayoutB = layout::RowMajor; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + IteratorThreadMapA + >; + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = detail::simt_get_warp_threads_m(); + static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); + static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 1>; + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::RowMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) + >; /// Used for partial specialization + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape<0, 0>, + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: column-major +/// Operator: simt class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, ElementA_, + layout::RowMajor, ElementB_, layout::ColumnMajor, + ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ + > { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 1>; + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassSimt; + static int const PartitionsK = Shape::kK / WarpShape::kK; + + /// Default Operator + using Operator = Operator_; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + PartitionsK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + static int const kElementsPerAccess = 1; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajor; + using SmemLayoutB = layout::RowMajor; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + SmemThreadMapA // was IteratorThreadMapA + >; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + SmemThreadMapB // was IteratorThreadMapA + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = detail::simt_get_warp_threads_m(); + static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); + static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); + + static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits::value); + static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits::value); + + static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN), + "Padding must be divisible by Lane"); + + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 1>; + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::RowMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) + >; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape, // skew for A matrix to avoid SMEM bank conflicts + MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts + WarpCount::kK + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: row-major +/// Operator: simt class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, ElementA_, + layout::RowMajor, ElementB_, layout::RowMajor, ElementC_, + LayoutC_, arch::OpClassSimt, 2, Operator_ + > { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 1>; + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassSimt; + static int const PartitionsK = Shape::kK / WarpShape::kK; + + /// Default Operator + using Operator = Operator_; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + PartitionsK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + static int const kElementsPerAccess = 1; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajor; + using SmemLayoutB = layout::RowMajor; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + SmemThreadMapA + >; + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = detail::simt_get_warp_threads_m(); + static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); + static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); + + static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits::value); + + static_assert(!(kPaddingM % LaneM), + "Padding must be divisible by Lane"); + + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 1>; + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::RowMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) + >; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape, // skew for A matrix to avoid SMEM bank conflicts + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: column-major +/// Operator: simt class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, ElementA_, + layout::ColumnMajor, ElementB_, layout::ColumnMajor, + ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ + > { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 1>; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassSimt; + static int const PartitionsK = Shape::kK / WarpShape::kK; + + /// Default Operator + using Operator = Operator_; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + PartitionsK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + static int const kElementsPerAccess = 1; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajor; + using SmemLayoutB = layout::RowMajor; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + IteratorThreadMapA + >; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + SmemThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = detail::simt_get_warp_threads_m(); + static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); + static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); + + static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits::value); + + static_assert(!(kPaddingN % LaneN), + "Padding must be divisible by Lane"); + + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 1>; + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::RowMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) + >; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape<0, 0>, + MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts + WarpCount::kK + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: row-major +/// Operator: simt class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, ElementA_, + layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2RowMajor, + ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ + > { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 1>; + using ElementA = ElementA_; + using LayoutA = layout::AffineRank2ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::AffineRank2RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassSimt; + + /// Default Operator + using Operator = Operator_; + + using Base = DefaultMmaCore; + + // + // Shared memory layouts + // + + using SmemLayoutA = typename Base::SmemLayoutA; + using SmemLayoutB = typename Base::SmemLayoutB; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = typename Base::IteratorThreadMapA; + + /// Shared memory iterator to A operand + using SmemIteratorA = typename Base::SmemIteratorA; + + /// Policy of iterator B + using IteratorThreadMapB = typename Base::IteratorThreadMapB; + + /// Shared memory iterator to B operand + using SmemIteratorB = typename Base::SmemIteratorB; + + // + // Warp-level matrix multiply operator + // + + /// Policy used to define MmaPipelined + using MmaPolicy = typename Base::MmaPolicy; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: column-major +/// Operator: simt class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, ElementA_, + layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2ColumnMajor, + ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ + > { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 1>; + using ElementA = ElementA_; + using LayoutA = layout::AffineRank2RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::AffineRank2ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassSimt; + + /// Default Operator + using Operator = Operator_; + + using Base = DefaultMmaCore; + + // + // Shared memory layouts + // + + using SmemLayoutA = typename Base::SmemLayoutA; + using SmemLayoutB = typename Base::SmemLayoutB; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = typename Base::IteratorThreadMapA; + + /// Shared memory iterator to A operand + using SmemIteratorA = typename Base::SmemIteratorA; + + /// Policy of iterator B + using IteratorThreadMapB = typename Base::IteratorThreadMapB; + + /// Shared memory iterator to B operand + using SmemIteratorB = typename Base::SmemIteratorB; + + // + // Warp-level matrix multiply operator + // + + /// Policy used to define MmaPipelined + using MmaPolicy = typename Base::MmaPolicy; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: row-major +/// Operator: simt class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, ElementA_, + layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2RowMajor, ElementC_, + LayoutC_, arch::OpClassSimt, 2, Operator_ + > { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 1>; + using ElementA = ElementA_; + using LayoutA = layout::AffineRank2RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::AffineRank2RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassSimt; + + /// Default Operator + using Operator = Operator_; + + using Base = DefaultMmaCore; + + // + // Shared memory layouts + // + + using SmemLayoutA = typename Base::SmemLayoutA; + using SmemLayoutB = typename Base::SmemLayoutB; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = typename Base::IteratorThreadMapA; + + /// Shared memory iterator to A operand + using SmemIteratorA = typename Base::SmemIteratorA; + + /// Policy of iterator B + using IteratorThreadMapB = typename Base::IteratorThreadMapB; + + /// Shared memory iterator to B operand + using SmemIteratorB = typename Base::SmemIteratorB; + + // + // Warp-level matrix multiply operator + // + + /// Policy used to define MmaPipelined + using MmaPolicy = typename Base::MmaPolicy; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: column-major +/// Operator: simt class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, ElementA_, + layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2ColumnMajor, + ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ + > { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 1>; + using ElementA = ElementA_; + using LayoutA = layout::AffineRank2ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::AffineRank2ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassSimt; + + /// Default Operator + using Operator = Operator_; + + using Base = DefaultMmaCore; + + // + // Shared memory layouts + // + + using SmemLayoutA = typename Base::SmemLayoutA; + using SmemLayoutB = typename Base::SmemLayoutB; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = typename Base::IteratorThreadMapA; + + /// Shared memory iterator to A operand + using SmemIteratorA = typename Base::SmemIteratorA; + + /// Policy of iterator B + using IteratorThreadMapB = typename Base::IteratorThreadMapB; + + /// Shared memory iterator to B operand + using SmemIteratorB = typename Base::SmemIteratorB; + + // + // Warp-level matrix multiply operator + // + + /// Policy used to define MmaPipelined + using MmaPolicy = typename Base::MmaPolicy; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: row-major +/// Operator: simt class, for dp4a +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, int8_t, + layout::ColumnMajor, int8_t, layout::RowMajor, ElementC_, + LayoutC_, arch::OpClassSimt, 2, Operator_ + > { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 4>; + using ElementA = int8_t; + using LayoutA = layout::ColumnMajor; + using ElementB = int8_t; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassSimt; + static int const PartitionsK = Shape::kK / WarpShape::kK; + + /// Default Operator + using Operator = Operator_; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + PartitionsK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorInterleaved<4>; + using SmemLayoutB = layout::RowMajorInterleaved<4>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<4, 4> + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + IteratorThreadMapA + >; + + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<4, 4> + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = detail::simt_get_warp_threads_m(); + static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(4, ThreadTileM); + static const int LaneN = cutlass::const_min(4, ThreadTileN); + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 4>; + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::ColumnMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) + PartitionsK /// Number of partitions along K dimension + >; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape<0, 0>, + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Partial specialization: +// +/// +/// A: Row-major +/// B: Column-major +/// Operator: simt class, for dp4a +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, int8_t, + layout::RowMajor, int8_t, layout::ColumnMajor, ElementC_, + LayoutC_, arch::OpClassSimt, 2, Operator_ + > { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 4>; + using ElementA = int8_t; + using LayoutA = layout::RowMajor; + using ElementB = int8_t; + using LayoutB = layout::ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassSimt; + static int const PartitionsK = Shape::kK / WarpShape::kK; + + /// Default Operator + using Operator = Operator_; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + PartitionsK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorInterleaved<4>; + using SmemLayoutB = layout::RowMajorInterleaved<4>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<4, 4> + >; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapA = transform::TransposePitchLinearThreadMap2DThreadTile; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + SmemThreadMapA + >; + + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<4, 4> + >; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapB = transform::TransposePitchLinearThreadMap2DThreadTile; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + SmemThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = detail::simt_get_warp_threads_m(); + static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(4, ThreadTileM); + static const int LaneN = cutlass::const_min(4, ThreadTileN); + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 4>; + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::ColumnMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) + PartitionsK /// Number of partitions along K dimension + >; + + static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits::value); + static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits::value); + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape, + MatrixShape<0, kPaddingN>, + WarpCount::kK + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Partial specialization: +// +/// +/// A: Row-major +/// B: Row-major +/// Operator: simt class, for dp4a +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, int8_t, + layout::RowMajor, int8_t, layout::RowMajor, ElementC_, + LayoutC_, arch::OpClassSimt, 2, Operator_ + > { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 4>; + using ElementA = int8_t; + using LayoutA = layout::RowMajor; + using ElementB = int8_t; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassSimt; + static int const PartitionsK = Shape::kK / WarpShape::kK; + + /// Default Operator + using Operator = Operator_; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + PartitionsK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorInterleaved<4>; + using SmemLayoutB = layout::RowMajorInterleaved<4>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<4, 4> + >; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapA = transform::TransposePitchLinearThreadMap2DThreadTile; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + SmemThreadMapA + >; + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<4, 4> + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = detail::simt_get_warp_threads_m(); + static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(4, ThreadTileM); + static const int LaneN = cutlass::const_min(4, ThreadTileN); + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 4>; + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::ColumnMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) + PartitionsK /// Number of partitions along K dimension + >; + + static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits::value); + static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits::value); + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape, + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Partial specialization: +// +/// +/// A: Column-major +/// B: Column-major +/// Operator: simt class, for dp4a +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, int8_t, + layout::ColumnMajor, int8_t, layout::ColumnMajor, ElementC_, + LayoutC_, arch::OpClassSimt, 2, Operator_ + > { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 4>; + using ElementA = int8_t; + using LayoutA = layout::ColumnMajor; + using ElementB = int8_t; + using LayoutB = layout::ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassSimt; + static int const PartitionsK = Shape::kK / WarpShape::kK; + + /// Default Operator + using Operator = Operator_; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + PartitionsK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorInterleaved<4>; + using SmemLayoutB = layout::RowMajorInterleaved<4>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<4, 4> + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + IteratorThreadMapA + >; + + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<4, 4> + >; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapB = transform::TransposePitchLinearThreadMap2DThreadTile; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + SmemThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = detail::simt_get_warp_threads_m(); + static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(4, ThreadTileM); + static const int LaneN = cutlass::const_min(4, ThreadTileN); + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 4>; + + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::ColumnMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) + PartitionsK /// Number of partitions along K dimension + >; + + static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits::value); + static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits::value); + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape<0, 0>, + MatrixShape<0, kPaddingN>, + WarpCount::kK + >; +}; + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm70.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm70.h new file mode 100644 index 0000000000000000000000000000000000000000..fc839653d756c4e799bd880be899b19af11a03f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm70.h @@ -0,0 +1,682 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data + layout of the global memory fragments, data types, and internal tile sizes. + + Partial specializations for threadblock::Mma operations targeting TensorOp instructions. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + + +#include "cutlass/layout/tensor_op_multiplicand_sm70.h" +#include "cutlass/transform/pitch_linear_thread_map.h" +#include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op_sm70.h" + +#include "cutlass/gemm/warp/mma_tensor_op_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, ElementA_, + layout::ColumnMajor, ElementB_, layout::RowMajor, + ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_ + > { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<8, 8, 4>; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassTensorOp; + + /// Default Operator + using Operator = Operator_; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + Shape::kK / WarpShape::kK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + // + // Shared memory layouts + // + + using SmemLayoutA = + layout::ColumnMajorVoltaTensorOpMultiplicandCongruous< + sizeof_bits::value>; + + // Shared memory layout + using SmemLayoutB = + layout::RowMajorVoltaTensorOpMultiplicandBCongruous< + sizeof_bits::value>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + IteratorThreadMapA + >; + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + cutlass::gemm::GemmShape<16, 16, 4>, + 32, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> + >; + + using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< + WarpShape, + ElementA, + SmemLayoutA, + ElementB, + SmemLayoutB, + ElementC, + LayoutC, + Policy + >; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaTensorOp, + MatrixShape<0, 0>, + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +/// Partial specialization: +/// +/// A: row-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, ElementA_, + layout::RowMajor, ElementB_, layout::ColumnMajor, + ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_ + > { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<8, 8, 4>; + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassTensorOp; + + /// Default Operator + using Operator = Operator_; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + Shape::kK / WarpShape::kK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorVoltaTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // Shared memory layout + using SmemLayoutB = layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<4, 8>, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 0, + IteratorThreadMapA + >; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<4, 8>, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 1, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + cutlass::gemm::GemmShape<16, 16, 4>, + 32, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> + >; + + using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< + WarpShape, + ElementA, + SmemLayoutA, + ElementB, + SmemLayoutB, + ElementC, + LayoutC, + Policy + >; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaTensorOp, + MatrixShape<0, 0>, + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, ElementA_, + layout::RowMajor, ElementB_, layout::RowMajor, ElementC_, + LayoutC_, arch::OpClassTensorOp, 2, Operator_ + > { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<8, 8, 4>; + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassTensorOp; + + /// Default Operator + using Operator = Operator_; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + Shape::kK / WarpShape::kK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorVoltaTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // Shared memory layout + using SmemLayoutB = layout::RowMajorVoltaTensorOpMultiplicandBCongruous< + sizeof_bits::value>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<4, 8>, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 0, + IteratorThreadMapA + >; + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + cutlass::gemm::GemmShape<16, 16, 4>, + 32, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> + >; + + using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< + WarpShape, + ElementA, + SmemLayoutA, + ElementB, + SmemLayoutB, + ElementC, + LayoutC, + Policy + >; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaTensorOp, + MatrixShape<0, 0>, + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore, ElementA_, + layout::ColumnMajor, ElementB_, layout::ColumnMajor, + ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_ + > { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<8, 8, 4>; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassTensorOp; + + /// Default Operator + using Operator = Operator_; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + Shape::kK / WarpShape::kK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorVoltaTensorOpMultiplicandCongruous< + sizeof_bits::value>; + + // Shared memory layout + using SmemLayoutB = layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + IteratorThreadMapA + >; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<4, 8>, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 1, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + cutlass::gemm::GemmShape<16, 16, 4>, + 32, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + cutlass::layout::RowMajor, + cutlass::arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> + >; + + using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< + WarpShape, + ElementA, + SmemLayoutA, + ElementB, + SmemLayoutB, + ElementC, + LayoutC, + Policy + >; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaTensorOp, + MatrixShape<0, 0>, + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm75.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm75.h new file mode 100644 index 0000000000000000000000000000000000000000..697c45f6273ba0036536a9043afc1122735cc10c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm75.h @@ -0,0 +1,1279 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data + layout of the global memory fragments, data types, and internal tile sizes. + + Partial specializations for threadblock::Mma operations targeting TensorOp instructions. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/platform/platform.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" +#include "cutlass/transform/pitch_linear_thread_map.h" +#include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h" + +#include "cutlass/gemm/warp/default_mma_tensor_op.h" +#include "cutlass/gemm/threadblock/default_mma_core.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassTensorOp; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + Shape::kK / WarpShape::kK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // + // Shared memory layouts + // + + using SmemLayoutA = + layout::ColumnMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(ElementA))>; + + // Shared memory layout + using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(ElementB))>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + IteratorThreadMapA + >; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaTensorOp, + MatrixShape<0, 0>, + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by MMA + typename Operator_> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassTensorOp; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + Shape::kK / WarpShape::kK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kWarpThreadArrangementContiguousA = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedA = + kWarpSize / kWarpThreadArrangementContiguousA; + + static int const kWarpThreadArrangementContiguousB = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedB = + kWarpSize / kWarpThreadArrangementContiguousB; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // Shared memory layout + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 0, + IteratorThreadMapA + >; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 1, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaTensorOp, + MatrixShape<0, 0>, + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by MMA + typename Operator_> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassTensorOp; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + Shape::kK / WarpShape::kK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kWarpThreadArrangementContiguousA = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedA = + kWarpSize / kWarpThreadArrangementContiguousA; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // Shared memory layout + using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(ElementB))>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 0, + IteratorThreadMapA + >; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaTensorOp, + MatrixShape<0, 0>, + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by MMA + typename Operator_> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassTensorOp; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kWarpThreadArrangementContiguousB = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedB = + kWarpSize / kWarpThreadArrangementContiguousB; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(ElementA))>; + + // Shared memory layout + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// +/// Below is for arch::OpMultiplyAddFastF16 + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = float; + using LayoutA = layout::ColumnMajor; + using ElementB = float; + using LayoutB = layout::RowMajor; + using ElementC = float; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassTensorOp; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + Shape::kK / WarpShape::kK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 256; + + /// Default Operator + using Operator = arch::OpMultiplyAdd; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(half_t))>; + + // Shared memory layout + using SmemLayoutB = + layout::RowMajorTensorOpMultiplicandCongruous::value, + int(128 / sizeof(half_t))>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + half_t, + SmemLayoutA, + 1, + IteratorThreadMapA + >; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + half_t, + SmemLayoutB, + 0, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaTensorOp, + MatrixShape<0, 0>, + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = float; + using LayoutA = layout::RowMajor; + using ElementB = float; + using LayoutB = layout::ColumnMajor; + using ElementC = float; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassTensorOp; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + Shape::kK / WarpShape::kK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 256; + + /// Default Operator + using Operator = arch::OpMultiplyAdd; + + // Warp thread arrangement + static int const kWarpThreadArrangementContiguousA = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedA = + kWarpSize / kWarpThreadArrangementContiguousA; + + static int const kWarpThreadArrangementContiguousB = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedB = + kWarpSize / kWarpThreadArrangementContiguousB; + + // + // Shared memory layouts + // + + using SmemLayoutA = + layout::RowMajorTensorOpMultiplicandCrosswise::value, + Shape::kK>; + + // Shared memory layout + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + half_t, + SmemLayoutA, + 0, + IteratorThreadMapA + >; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + half_t, + SmemLayoutB, + 1, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaTensorOp, + MatrixShape<0, 0>, + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = float; + using LayoutA = layout::RowMajor; + using ElementB = float; + using LayoutB = layout::RowMajor; + using ElementC = float; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassTensorOp; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + Shape::kK / WarpShape::kK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 256; + + /// Default Operator + using Operator = arch::OpMultiplyAdd; + + // Warp thread arrangement + static int const kWarpThreadArrangementContiguousA = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedA = + kWarpSize / kWarpThreadArrangementContiguousA; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // Shared memory layout + using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(half_t))>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + half_t, + SmemLayoutA, + 0, + IteratorThreadMapA + >; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + half_t, + SmemLayoutB, + 0, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaTensorOp, + MatrixShape<0, 0>, + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = float; + using LayoutA = layout::ColumnMajor; + using ElementB = float; + using LayoutB = layout::ColumnMajor; + using ElementC = float; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassTensorOp; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 256; + + /// Default Operator + using Operator = arch::OpMultiplyAdd; + + // Warp thread arrangement + static int const kWarpThreadArrangementContiguousB = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedB = + kWarpSize / kWarpThreadArrangementContiguousB; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(half_t))>; + + // Shared memory layout + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, half_t, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, half_t, SmemLayoutB, 1, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, MatrixShape<0, 0>, + WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major-interleave +/// B: row-major-interleave +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +/// +/// Column/RowMajorInterleved(m, n) is mapped to Column/RowMajor(m +/// x InterleavedK, n / InterleavedK) so that Column/RowMajor global iterators +/// can be reused. The shared store iterator is the same as the crosswise shared +/// store iterator. So, the only thing we need to do is to swap the coordinates +/// (contiguous <=> strided) used by the global iterator and the shared store +/// iterator. +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by MMA + typename Operator_, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor, + /// Number of interleaved k + int InterleavedK> +struct DefaultMmaCore, ElementB_, + layout::RowMajorInterleaved, ElementC_, + LayoutC_, arch::OpClassTensorOp, 2, Operator_, + AccumulatorsInRowMajor> { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajorInterleaved; + using ElementB = ElementB_; + using LayoutB = layout::RowMajorInterleaved; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassTensorOp; + static int const kInterleavedK = InterleavedK; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kElementsPerAccess = + kAccessSizeInBits / sizeof_bits::value; + + static int const kWarpThreadArrangementContiguous = + kInterleavedK / kElementsPerAccess; + + static int const kWarpThreadArrangementStrided = + kWarpSize / kWarpThreadArrangementContiguous; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, kInterleavedK>; + + // Shared memory layout + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, kInterleavedK>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapA = transform::TransposePitchLinearThreadMap< + IteratorThreadMapA, + layout::PitchLinearShape>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, ElementA, SmemLayoutA, 0, + SmemThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapB = transform::TransposePitchLinearThreadMap< + IteratorThreadMapB, + layout::PitchLinearShape>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + SmemThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK, AccumulatorsInRowMajor>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm80.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm80.h new file mode 100644 index 0000000000000000000000000000000000000000..39a6454d007ba5f1d31cf556be9d0932a2d51eaa --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm80.h @@ -0,0 +1,2916 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Defines basic properties needed by CTA-level GEMMs assuming + expectations about data layout of the global memory fragments, data types, + and internal tile sizes. + + Partial specializations for threadblock::Mma operations targeting TensorOp + instructions. + + SM80 Multi stage kernel expects stage number to be larger or equal to 3 + to use asyncronous copy. +*/ + +#pragma once + +#include "cutlass/array.h" +#include "cutlass/cutlass.h" + +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" +#include "cutlass/layout/tensor_op_multiplicand_sm80.h" + +#include "cutlass/gemm/warp/mma_simt_policy.h" +#include "cutlass/gemm/warp/mma_simt.h" +#include "cutlass/gemm/warp/default_mma_tensor_op.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" + +#include "cutlass/gemm/threadblock/default_mma_core.h" +#include "cutlass/gemm/threadblock/default_multistage_mma_complex_core.h" +#include "cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h" + +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" +#include "cutlass/transform/pitch_linear_thread_map.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h" +#include "cutlass/gemm/threadblock/mma_multistage.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for double-precision +/// +/// A: column-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = double; + using LayoutA = layout::ColumnMajor; + using ElementB = double; + using LayoutB = layout::ColumnMajor; + using ElementC = double; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 64; + + /// Default Operator + using Operator = Operator_; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b; + + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +/// Partial specialization for double-precision +/// +/// A: column-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = double; + using LayoutA = layout::ColumnMajor; + using ElementB = double; + using LayoutB = layout::RowMajor; + using ElementC = double; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 64; + + /// Default Operator + using Operator = Operator_; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b; + + // Shared memory layout + using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for double-precision +/// +/// A: row-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = double; + using LayoutA = layout::RowMajor; + using ElementB = double; + using LayoutB = layout::ColumnMajor; + using ElementC = double; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 64; + + /// Default Operator + using Operator = Operator_; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise; + + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// +/// +/// Partial specialization for double-precision +/// +/// A: row-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = double; + using LayoutA = layout::RowMajor; + using ElementB = double; + using LayoutB = layout::RowMajor; + using ElementC = double; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 64; + + /// Default Operator + using Operator = Operator_; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise; + + using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b; + + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for double-precision +/// +/// A: column-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = double; + using LayoutA = layout::AffineRank2ColumnMajor; + using ElementB = double; + using LayoutB = layout::AffineRank2ColumnMajor; + using ElementC = double; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Default Operator + using Operator = Operator_; + + using Base = DefaultMmaCore; + + // + // Shared memory layouts + // + + using SmemLayoutA = typename Base::SmemLayoutA; + using SmemLayoutB = typename Base::SmemLayoutB; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = typename Base::IteratorThreadMapA; + + /// Shared memory iterator to A operand + using SmemIteratorA = typename Base::SmemIteratorA; + + /// Policy of iterator B + using IteratorThreadMapB = typename Base::IteratorThreadMapB; + + /// Shared memory iterator to B operand + using SmemIteratorB = typename Base::SmemIteratorB; + + // + // Warp-level matrix multiply operator + // + + /// Policy used to define MmaPipelined + using MmaPolicy = typename Base::MmaPolicy; +}; + +/// Partial specialization for double-precision +/// +/// A: column-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = double; + using LayoutA = layout::AffineRank2ColumnMajor; + using ElementB = double; + using LayoutB = layout::AffineRank2RowMajor; + using ElementC = double; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Default Operator + using Operator = Operator_; + + using Base = DefaultMmaCore; + + // + // Shared memory layouts + // + + using SmemLayoutA = typename Base::SmemLayoutA; + using SmemLayoutB = typename Base::SmemLayoutB; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = typename Base::IteratorThreadMapA; + + /// Shared memory iterator to A operand + using SmemIteratorA = typename Base::SmemIteratorA; + + /// Policy of iterator B + using IteratorThreadMapB = typename Base::IteratorThreadMapB; + + /// Shared memory iterator to B operand + using SmemIteratorB = typename Base::SmemIteratorB; + + // + // Warp-level matrix multiply operator + // + + /// Policy used to define MmaPipelined + using MmaPolicy = typename Base::MmaPolicy; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for double-precision +/// +/// A: row-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = double; + using LayoutA = layout::AffineRank2RowMajor; + using ElementB = double; + using LayoutB = layout::AffineRank2ColumnMajor; + using ElementC = double; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Default Operator + using Operator = Operator_; + + using Base = DefaultMmaCore; + + // + // Shared memory layouts + // + + using SmemLayoutA = typename Base::SmemLayoutA; + using SmemLayoutB = typename Base::SmemLayoutB; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = typename Base::IteratorThreadMapA; + + /// Shared memory iterator to A operand + using SmemIteratorA = typename Base::SmemIteratorA; + + /// Policy of iterator B + using IteratorThreadMapB = typename Base::IteratorThreadMapB; + + /// Shared memory iterator to B operand + using SmemIteratorB = typename Base::SmemIteratorB; + + // + // Warp-level matrix multiply operator + // + + /// Policy used to define MmaPipelined + using MmaPolicy = typename Base::MmaPolicy; +}; + +//////////////////////////////////////////////////////////////////////////////// +/// +/// Partial specialization for double-precision +/// +/// A: row-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = double; + using LayoutA = layout::AffineRank2RowMajor; + using ElementB = double; + using LayoutB = layout::AffineRank2RowMajor; + using ElementC = double; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Default Operator + using Operator = Operator_; + + using Base = DefaultMmaCore; + + // + // Shared memory layouts + // + + using SmemLayoutA = typename Base::SmemLayoutA; + using SmemLayoutB = typename Base::SmemLayoutB; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = typename Base::IteratorThreadMapA; + + /// Shared memory iterator to A operand + using SmemIteratorA = typename Base::SmemIteratorA; + + /// Policy of iterator B + using IteratorThreadMapB = typename Base::IteratorThreadMapB; + + /// Shared memory iterator to B operand + using SmemIteratorB = typename Base::SmemIteratorB; + + // + // Warp-level matrix multiply operator + // + + /// Policy used to define MmaPipelined + using MmaPolicy = typename Base::MmaPolicy; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for float-precision +/// +/// ElementA: complex +/// ElementB: complex +/// ElementC: complex +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Layout for A operand + typename LayoutA_, + /// Layout for B operand + typename LayoutB_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB, + /// per-element transformation for elements of A + ComplexTransform TransformA_, + /// per-element transformation for elements of B + ComplexTransform TransformB_ + > +struct DefaultMmaCore< + Shape_, WarpShape_, GemmShape<16, 8, 8>, + complex, LayoutA_, + complex, LayoutB_, + complex, LayoutC_, + arch::OpClassTensorOp, + Stages, + Operator_, + false, + CacheOpA, + CacheOpB, + TransformA_, TransformB_, true> { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<16, 8, 8>; + using ElementA = complex; + using LayoutA = LayoutA_; + using ElementB = complex; + using LayoutB = LayoutB_; + using ElementC = complex; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + static const ComplexTransform TransformA = TransformA_; + static const ComplexTransform TransformB = TransformB_; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + static_assert( + platform::is_same::value || + platform::is_same::value || + platform::is_same::value, + "The operator tag must indicate complex multiplication."); + + // + // Underlying template + // + + using MmaComplexCore = DefaultMultistageMmaComplexCore< + Shape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + arch::OpClassTensorOp, + kStages, + TransformA, + TransformB, + Operator, + kCacheOpA, + kCacheOpB + >; + + // + // Shared memory layouts + // + + using SmemLayoutA = typename MmaComplexCore::SmemLayoutA; + + // Shared memory layout + using SmemLayoutB = typename MmaComplexCore::SmemLayoutB; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = typename MmaComplexCore::IteratorThreadMapA; + + /// Shared memory iterator to A operand + using SmemIteratorA = typename MmaComplexCore::SmemIteratorA; + + /// ThreadMap of iterator B + using IteratorThreadMapB = typename MmaComplexCore::IteratorThreadMapB; + + /// Shared memory iterator to B operand + using SmemIteratorB = typename MmaComplexCore::SmemIteratorB; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename MmaComplexCore::MmaTensorOp; + + /// Policy used to define MmaPipelined + using MmaPolicy = typename MmaComplexCore::MmaPolicy; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for double-precision +/// +/// ElementA: complex +/// ElementB: complex +/// ElementC: complex +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout for A operand + typename LayoutA_, + /// Layout for B operand + typename LayoutB_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB, + /// per-element transformation for elements of A + ComplexTransform TransformA_, + /// per-element transformation for elements of B + ComplexTransform TransformB_ + > +struct DefaultMmaCore< + Shape_, WarpShape_, InstructionShape_, + complex, LayoutA_, + complex, LayoutB_, + complex, LayoutC_, + arch::OpClassTensorOp, + Stages, + Operator_, + false, + CacheOpA, + CacheOpB, + TransformA_, TransformB_, true> { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = complex; + using LayoutA = LayoutA_; + using ElementB = complex; + using LayoutB = LayoutB_; + using ElementC = complex; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + static const ComplexTransform TransformA = TransformA_; + static const ComplexTransform TransformB = TransformB_; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 64; + + /// Default Operator + using Operator = Operator_; + + static_assert( + platform::is_same::value || + platform::is_same::value, + "The operator tag must indicate complex multiplication."); + + // + // Underlying template + // + + using MmaComplexCore = DefaultMultistageMmaComplexCore< + Shape, WarpShape, InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementC, LayoutC, + arch::OpClassTensorOp, + kStages, + TransformA, + TransformB, + Operator, + kCacheOpA, + kCacheOpB + >; + + // + // Shared memory layouts + // + + using SmemLayoutA = typename MmaComplexCore::SmemLayoutA; + + // Shared memory layout + using SmemLayoutB = typename MmaComplexCore::SmemLayoutB; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = typename MmaComplexCore::IteratorThreadMapA; + + /// Shared memory iterator to A operand + using SmemIteratorA = typename MmaComplexCore::SmemIteratorA; + + /// ThreadMap of iterator B + using IteratorThreadMapB = typename MmaComplexCore::IteratorThreadMapB; + + /// Shared memory iterator to B operand + using SmemIteratorB = typename MmaComplexCore::SmemIteratorB; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename MmaComplexCore::MmaTensorOp; + + /// Policy used to define MmaPipelined + using MmaPolicy = typename MmaComplexCore::MmaPolicy; +}; + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(ElementA))>; + + // Shared memory layout + using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(ElementB))>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kWarpThreadArrangementContiguousA = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedA = + kWarpSize / kWarpThreadArrangementContiguousA; + + static int const kWarpThreadArrangementContiguousB = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedB = + kWarpSize / kWarpThreadArrangementContiguousB; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // Shared memory layout + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 0, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::ColumnMajor; + + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kWarpThreadArrangementContiguousB = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedB = + kWarpSize / kWarpThreadArrangementContiguousB; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(ElementA))>; + + // Shared memory layout + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kWarpThreadArrangementContiguousA = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedA = + kWarpSize / kWarpThreadArrangementContiguousA; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK>; + + // Shared memory layout + using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(ElementB))>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 0, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major-interleaved +/// B: row-major-interleaved +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +/// +/// Column/RowMajorInterleved(m, n) is mapped to Column/RowMajor(m +/// x InterleavedK, n / InterleavedK) so that Column/RowMajor global iterators +/// can be reused. The shared store iterator is the same as the crosswise shared +/// store iterator. So, the only thing we need to do is to swap the coordinates +/// (contiguous <=> strided) used by the global iterator and the shared store +/// iterator. +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB, + /// Number of interleaved K + int InterleavedK> +struct DefaultMmaCore, ElementB_, + layout::RowMajorInterleaved, ElementC_, + LayoutC_, arch::OpClassTensorOp, Stages, Operator_, + AccumulatorsInRowMajor, CacheOpA, CacheOpB> { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajorInterleaved; + using ElementB = ElementB_; + using LayoutB = layout::RowMajorInterleaved; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + static int const kInterleavedK = InterleavedK; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kElementsPerAccess = + kAccessSizeInBits / sizeof_bits::value; + + static int const kWarpThreadArrangementContiguous = + kInterleavedK / kElementsPerAccess; + + static int const kWarpThreadArrangementStrided = + kWarpSize / kWarpThreadArrangementContiguous; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, kInterleavedK>; + + // Shared memory layout + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, kInterleavedK>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapA = transform::TransposePitchLinearThreadMap< + IteratorThreadMapA, + layout::PitchLinearShape>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 0, + SmemThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, + kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapB = transform::TransposePitchLinearThreadMap< + IteratorThreadMapB, + layout::PitchLinearShape>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + SmemThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK, AccumulatorsInRowMajor>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for SIMT GEMMs using multistage pipeline. +/// +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by Simt + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kElementsPerAccess = 1; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajor; + + // Shared memory layout + using SmemLayoutB = layout::RowMajor; + + // + // Iterators to write to shared memory + // + + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 0, + IteratorThreadMapA>; + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Transpose the ThreadMap of iterator B + using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + SmemThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = 4; + static const int WarpNumThreadsN = 8; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); + static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); + + static_assert(!((Shape::kK / 32) % LaneN), + "Padding must be divisible by Lane"); + + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 1>; + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::RowMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + >; /// Used for partial specialization + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape<0, 0>, + MatrixShape<0, Shape::kK / 32>, + WarpCount::kK>; +}; + +/// Partial specialization for SIMT GEMMs using multistage pipeline. +/// +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by Simt + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kElementsPerAccess = 1; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajor; + + // Shared memory layout + using SmemLayoutB = layout::RowMajor; + + // + // Iterators to write to shared memory + // + + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 0, + IteratorThreadMapA>; + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = 4; + static const int WarpNumThreadsN = 8; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); + static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 1>; + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::RowMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + >; /// Used for partial specialization + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape<0, 0>, + MatrixShape<0, 0>, + WarpCount::kK>; +}; + +/// Partial specialization for SIMT GEMMs using multistage pipeline. +/// +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by Simt + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kElementsPerAccess = 1; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajor; + + // Shared memory layout + using SmemLayoutB = layout::RowMajor; + + // + // Iterators to write to shared memory + // + + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 0, + SmemThreadMapA>; + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Transpose the ThreadMap of iterator B + using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + SmemThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = 4; + static const int WarpNumThreadsN = 8; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); + static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); + + static_assert(!((Shape::kK / 32) % LaneM) && !((Shape::kK / 32) % LaneN), + "Padding must be divisible by Lane"); + + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 1>; + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::RowMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + >; /// Used for partial specialization + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape, + MatrixShape<0, Shape::kK / 32>, + WarpCount::kK>; +}; + +/// Partial specialization for SIMT GEMMs using multistage pipeline. +/// +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by Simt + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kElementsPerAccess = 1; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajor; + + // Shared memory layout + using SmemLayoutB = layout::RowMajor; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 0, + SmemThreadMapA>; + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = 4; + static const int WarpNumThreadsN = 8; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); + static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); + + static_assert(!((Shape::kK / 32) % LaneM), + "Padding must be divisible by Lane"); + + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 1>; + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::RowMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + >; /// Used for partial specialization + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape, + MatrixShape<0, 0>, + WarpCount::kK>; +}; + +/// Partial specialization for SIMT GEMMs using multistage pipeline. +/// +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by Simt + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::AffineRank2ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::AffineRank2RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Default Operator + using Operator = Operator_; + + using Base = DefaultMmaCore; + + // + // Shared memory layouts + // + + using SmemLayoutA = typename Base::SmemLayoutA; + using SmemLayoutB = typename Base::SmemLayoutB; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = typename Base::IteratorThreadMapA; + + /// Shared memory iterator to A operand + using SmemIteratorA = typename Base::SmemIteratorA; + + /// Policy of iterator B + using IteratorThreadMapB = typename Base::IteratorThreadMapB; + + /// Shared memory iterator to B operand + using SmemIteratorB = typename Base::SmemIteratorB; + + // + // Warp-level matrix multiply operator + // + + /// Policy used to define MmaPipelined + using MmaPolicy = typename Base::MmaPolicy; +}; + +/// Partial specialization for SIMT GEMMs using multistage pipeline. +/// +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by Simt + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::AffineRank2RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::AffineRank2ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Default Operator + using Operator = Operator_; + + using Base = DefaultMmaCore; + + // + // Shared memory layouts + // + + using SmemLayoutA = typename Base::SmemLayoutA; + using SmemLayoutB = typename Base::SmemLayoutB; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = typename Base::IteratorThreadMapA; + + /// Shared memory iterator to A operand + using SmemIteratorA = typename Base::SmemIteratorA; + + /// Policy of iterator B + using IteratorThreadMapB = typename Base::IteratorThreadMapB; + + /// Shared memory iterator to B operand + using SmemIteratorB = typename Base::SmemIteratorB; + + // + // Warp-level matrix multiply operator + // + + /// Policy used to define MmaPipelined + using MmaPolicy = typename Base::MmaPolicy; +}; + +/// Partial specialization for SIMT GEMMs using multistage pipeline. +/// +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by Simt + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::AffineRank2ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::AffineRank2ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Default Operator + using Operator = Operator_; + + using Base = DefaultMmaCore; + + // + // Shared memory layouts + // + + using SmemLayoutA = typename Base::SmemLayoutA; + using SmemLayoutB = typename Base::SmemLayoutB; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = typename Base::IteratorThreadMapA; + + /// Shared memory iterator to A operand + using SmemIteratorA = typename Base::SmemIteratorA; + + /// Policy of iterator B + using IteratorThreadMapB = typename Base::IteratorThreadMapB; + + /// Shared memory iterator to B operand + using SmemIteratorB = typename Base::SmemIteratorB; + + // + // Warp-level matrix multiply operator + // + + /// Policy used to define MmaPipelined + using MmaPolicy = typename Base::MmaPolicy; + +}; + +/// Partial specialization for SIMT GEMMs using multistage pipeline. +/// +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by Simt + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::AffineRank2RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::AffineRank2RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Default Operator + using Operator = Operator_; + + using Base = DefaultMmaCore; + + // + // Shared memory layouts + // + + using SmemLayoutA = typename Base::SmemLayoutA; + using SmemLayoutB = typename Base::SmemLayoutB; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = typename Base::IteratorThreadMapA; + + /// Shared memory iterator to A operand + using SmemIteratorA = typename Base::SmemIteratorA; + + /// Policy of iterator B + using IteratorThreadMapB = typename Base::IteratorThreadMapB; + + /// Shared memory iterator to B operand + using SmemIteratorB = typename Base::SmemIteratorB; + + // + // Warp-level matrix multiply operator + // + + /// Policy used to define MmaPipelined + using MmaPolicy = typename Base::MmaPolicy; + +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h new file mode 100644 index 0000000000000000000000000000000000000000..870845fd62d5d359cacd97d8d5bed5ed43c5fb1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h @@ -0,0 +1,834 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Defines basic properties needed by CTA-level GEMMs assuming + expectations about data layout of the global memory fragments, data types, + and internal tile sizes. + + Partial specializations for threadblock::Mma operations targeting sparse + TensorOp instructions. +*/ + +#pragma once + +#include "cutlass/array.h" +#include "cutlass/cutlass.h" + +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" +#include "cutlass/layout/tensor_op_multiplicand_sm80.h" + +#include "cutlass/gemm/warp/mma_simt_policy.h" +#include "cutlass/gemm/warp/mma_simt.h" +#include "cutlass/gemm/warp/default_mma_sparse_tensor_op.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" + +#include "cutlass/gemm/threadblock/default_mma_core.h" + +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" +#include "cutlass/transform/pitch_linear_thread_map.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h" +#include "cutlass/gemm/threadblock/mma_sparse_multistage.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Template defininng default matrix multiply operators inferred from threadblock tile size, +/// global memory data layout, and target math instruction. +template < + /// Shape of threadblock-scoped matrix multiply operator + typename Shape, + /// Shape of warp-level matrix multiply operator + typename WarpShape, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape, + /// Element data type of A operand + typename ElementA, + /// Layout of operand A + typename LayoutA, + /// Element data type of B operand + typename ElementB, + /// Layout of operand B + typename LayoutB, + /// Data type of accumulator + typename ElementC, + /// Layout of accumulator + typename LayoutC, + /// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp) + typename OperatorClass, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator = typename platform::conditional< + (platform::is_same::value) && + (platform::is_same::value || + platform::is_same::value || + platform::is_same::value || + platform::is_same::value), + cutlass::arch::OpMultiplyAddSaturate, + cutlass::arch::OpMultiplyAdd>::type, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false + /// Cache operation of operand A + , cutlass::arch::CacheOperation::Kind CacheOpA = + cutlass::arch::CacheOperation::Global, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB = + cutlass::arch::CacheOperation::Global +> +struct DefaultSparseMmaCore; + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultSparseMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + static int const kSparse = 2; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(ElementA))>; + + // Shared memory layout + using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(ElementB))>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Cache operation of operand E + static cutlass::arch::CacheOperation::Kind const kCacheOpE = + cutlass::arch::CacheOperation::Global; + + static int const kInterleavedE = MmaTensorOp::kInterleaved; + static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits; + static int const kMaxID2 = MmaTensorOp::kMaxID2; + static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE; + + using ElementE = typename MmaTensorOp::ElementE; + using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved; + + // Shared memory layout. Interleaved layout is mapped to PitchLinear layout. + using SmemLayoutE = typename MmaTensorOp::LayoutE; + + /// ThreadMap of iterator E + static int const kElementsPerAccessE = + kAccessSizeInBits / sizeof_bits::value; + + /// E is tiny. Not all warps are needed. + static int const kThreadsE = + (Shape::kM * Shape::kK / kSparse / kElementsPerElementE / + (kAccessSizeInBits / sizeof_bits::value) > + kThreads) + ? kThreads + : (Shape::kM * Shape::kK / kSparse / kElementsPerElementE / + (kAccessSizeInBits / sizeof_bits::value)); + + using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreadsE, kElementsPerAccessE>; + + /// Shared memory iterator to E operand + using SmemIteratorE = transform::threadblock::RegularTileAccessIterator< + MatrixShape, + ElementE, SmemLayoutE, 0, IteratorThreadMapE>; + + /// Policy used to define MmaPipelined + using MmaPolicy = + SparseMmaPolicy, MatrixShape<0, 0>, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultSparseMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + static int const kSparse = 2; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kWarpThreadArrangementContiguousA = + Shape::kK / kSparse / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedA = + kWarpSize / kWarpThreadArrangementContiguousA; + + // crosswise cannot be larger than 1024 bit. + static int const kCrosswiseB = + (Shape::kK > (1024 / sizeof_bits::value)) + ? (1024 / sizeof_bits::value) + : Shape::kK; + + static int const kWarpThreadArrangementContiguousB = + kCrosswiseB / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedB = + kWarpSize / kWarpThreadArrangementContiguousB; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK / kSparse>; + + // Shared memory layout + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, kCrosswiseB>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 0, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Cache operation of operand E + static cutlass::arch::CacheOperation::Kind const kCacheOpE = + cutlass::arch::CacheOperation::Global; + + static int const kInterleavedE = MmaTensorOp::kInterleaved; + static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits; + static int const kMaxID2 = MmaTensorOp::kMaxID2; + static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE; + + using ElementE = typename MmaTensorOp::ElementE; + using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved; + + // Shared memory layout. Interleaved layout is mapped to PitchLinear layout. + using SmemLayoutE = typename MmaTensorOp::LayoutE; + + /// ThreadMap of iterator E + static int const kElementsPerAccessE = + kAccessSizeInBits / sizeof_bits::value; + + /// E is tiny. Not all warps are needed. + static int const kThreadsE = + (Shape::kM * Shape::kK / kSparse / kElementsPerElementE / + (kAccessSizeInBits / sizeof_bits::value) > + kThreads) + ? kThreads + : (Shape::kM * Shape::kK / kSparse / kElementsPerElementE / + (kAccessSizeInBits / sizeof_bits::value)); + + using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreadsE, kElementsPerAccessE>; + + + /// Shared memory iterator to E operand + using SmemIteratorE = transform::threadblock::RegularTileAccessIterator< + MatrixShape, + ElementE, SmemLayoutE, 0, IteratorThreadMapE>; + + /// Policy used to define MmaPipelined + using MmaPolicy = + SparseMmaPolicy, MatrixShape<0, 0>, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultSparseMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::ColumnMajor; + + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + static int const kSparse = 2; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + // crosswise cannot be larger than 1024 bit. + static int const kCrosswiseB = + (Shape::kK > (1024 / sizeof_bits::value)) + ? (1024 / sizeof_bits::value) + : Shape::kK; + + static int const kWarpThreadArrangementContiguousB = + kCrosswiseB / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedB = + kWarpSize / kWarpThreadArrangementContiguousB; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(ElementA))>; + + // Shared memory layout + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, kCrosswiseB>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Cache operation of operand E + static cutlass::arch::CacheOperation::Kind const kCacheOpE = + cutlass::arch::CacheOperation::Global; + + static int const kInterleavedE = MmaTensorOp::kInterleaved; + static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits; + static int const kMaxID2 = MmaTensorOp::kMaxID2; + static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE; + + using ElementE = typename MmaTensorOp::ElementE; + using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved; + + // Shared memory layout. Interleaved layout is mapped to PitchLinear layout. + using SmemLayoutE = typename MmaTensorOp::LayoutE; + + /// ThreadMap of iterator E + static int const kElementsPerAccessE = + kAccessSizeInBits / sizeof_bits::value; + + /// E is tiny. Not all warps are needed. + static int const kThreadsE = + (Shape::kM * Shape::kK / kSparse / kElementsPerElementE / + (kAccessSizeInBits / sizeof_bits::value) > + kThreads) + ? kThreads + : (Shape::kM * Shape::kK / kSparse / kElementsPerElementE / + (kAccessSizeInBits / sizeof_bits::value)); + + using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreadsE, kElementsPerAccessE>; + + /// Shared memory iterator to E operand + using SmemIteratorE = transform::threadblock::RegularTileAccessIterator< + MatrixShape, + ElementE, SmemLayoutE, 0, IteratorThreadMapE>; + + /// Policy used to define MmaPipelined + using MmaPolicy = + SparseMmaPolicy, MatrixShape<0, 0>, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultSparseMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + static int const kSparse = 2; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kWarpThreadArrangementContiguousA = + Shape::kK / kSparse / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedA = + kWarpSize / kWarpThreadArrangementContiguousA; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kK / kSparse>; + + // Shared memory layout + using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(ElementB))>; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 0, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, WarpCount::kK>::Type; + + /// Cache operation of operand E + static cutlass::arch::CacheOperation::Kind const kCacheOpE = + cutlass::arch::CacheOperation::Global; + + static int const kInterleavedE = MmaTensorOp::kInterleaved; + static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits; + static int const kMaxID2 = MmaTensorOp::kMaxID2; + static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE; + + using ElementE = typename MmaTensorOp::ElementE; + using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved; + + // Shared memory layout. Interleaved layout is mapped to PitchLinear layout. + using SmemLayoutE = typename MmaTensorOp::LayoutE; + + /// ThreadMap of iterator E + static int const kElementsPerAccessE = + kAccessSizeInBits / sizeof_bits::value; + + /// E is tiny. Not all warps are needed. + static int const kThreadsE = + (Shape::kM * Shape::kK / kSparse / kElementsPerElementE / + (kAccessSizeInBits / sizeof_bits::value) > + kThreads) + ? kThreads + : (Shape::kM * Shape::kK / kSparse / kElementsPerElementE / + (kAccessSizeInBits / sizeof_bits::value)); + + using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreadsE, kElementsPerAccessE>; + + /// Shared memory iterator to E operand + using SmemIteratorE = transform::threadblock::RegularTileAccessIterator< + MatrixShape, + ElementE, SmemLayoutE, 0, IteratorThreadMapE>; + + /// Policy used to define MmaPipelined + using MmaPolicy = + SparseMmaPolicy, MatrixShape<0, 0>, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_with_access_size.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_with_access_size.h new file mode 100644 index 0000000000000000000000000000000000000000..91fa4495dbd0a09de6a926693160dcce7f603347 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_with_access_size.h @@ -0,0 +1,328 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data + layout of the global memory fragments, data types, and internal tile sizes. + + Partial specializations for threadblock::Mma operations targeting simt instructions. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/gemm/warp/mma.h" +#include "cutlass/gemm/threadblock/mma_pipelined.h" +#include "cutlass/gemm/threadblock/mma_singlestage.h" +#include "cutlass/arch/cache_operation.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +template < + /// Shape of threadblock-scoped matrix multiply operator + typename Shape, + /// Shape of warp-level matrix multiply operator + typename WarpShape, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape, + /// Element data type of A operand + typename ElementA, + /// Layout of operand A + typename LayoutA, + /// Element data type of B operand + typename ElementB, + /// Layout of operand B + typename LayoutB, + /// Data type of accumulator + typename ElementC, + /// Layout of accumulator + typename LayoutC, + /// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp) + typename OperatorClass, + /// Size of a threadblock-scoped access + int kAccessSizeInBits = -1, // -1 denoting the default + /// Number of stages + int Stages = 2, + /// Operation performed by MMA + typename Operator = typename platform::conditional< + (platform::is_same::value) && + (platform::is_same::value || + platform::is_same::value || + platform::is_same::value || + platform::is_same::value), + cutlass::arch::OpMultiplyAddSaturate, + cutlass::arch::OpMultiplyAdd>::type, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA = + cutlass::arch::CacheOperation::Global, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB = + cutlass::arch::CacheOperation::Global, + /// per-element transformation for elements of A + ComplexTransform TransformA = ComplexTransform::kNone, + /// per-element transformation for elements of B + ComplexTransform TransformB = ComplexTransform::kNone, + bool IsComplex = false // (is_complex::value || is_complex::value) +> +struct DefaultMmaCoreWithAccessSize; + +template < + /// Shape of threadblock-scoped matrix multiply operator + typename Shape, + /// Shape of warp-level matrix multiply operator + typename WarpShape, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape, + /// Element data type of A operand + typename ElementA, + /// Layout of operand A + typename LayoutA, + /// Element data type of B operand + typename ElementB, + /// Layout of operand B + typename LayoutB, + /// Data type of accumulator + typename ElementC, + /// Layout of accumulator + typename LayoutC, + /// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp) + typename OperatorClass, + /// Number of stages + int Stages, + /// Operation performed by MMA + typename Operator, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB, + /// per-element transformation for elements of A + ComplexTransform TransformA, + /// per-element transformation for elements of B + ComplexTransform TransformB, + bool IsComplex +> +struct DefaultMmaCoreWithAccessSize< + Shape, WarpShape, InstructionShape, + ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, + OperatorClass, -1, Stages, Operator, AccumulatorsInRowMajor, + CacheOpA, CacheOpB, TransformA, TransformB, IsComplex +> : DefaultMmaCore< + Shape, WarpShape, InstructionShape, + ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, + OperatorClass, Stages, Operator, AccumulatorsInRowMajor, + CacheOpA, CacheOpB, TransformA, TransformB, IsComplex +> {}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: row-major +/// Operator: simt class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Size of a threadblock-scoped access (a value of -1 indicates the default) + int kAccessSizeInBits_, + /// Operation performed by GEMM + typename Operator_> +struct DefaultMmaCoreWithAccessSize>::type, ElementA_, + layout::ColumnMajor, ElementB_, layout::RowMajor, + ElementC_, LayoutC_, arch::OpClassSimt, kAccessSizeInBits_, 2, Operator_ + > { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 1>; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassSimt; + static int const PartitionsK = Shape::kK / WarpShape::kK; + + /// Default Operator + using Operator = Operator_; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + PartitionsK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + static int const kElementsPerAccessDefault = 1; + static_assert(kAccessSizeInBits_ == -1 || + sizeof_bits::value == sizeof_bits::value || + kAccessSizeInBits_ / sizeof_bits::value == kElementsPerAccessDefault, + "Non-default value for kAccessSizeInBits_ is only allowed if size(elementA) == sizeof(elementB)"); + static int const kElementsPerAccess = (kAccessSizeInBits_ != -1) ? kAccessSizeInBits_ / sizeof_bits::value : kElementsPerAccessDefault; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajor; + using SmemLayoutB = layout::RowMajor; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + IteratorThreadMapA + >; + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = detail::simt_get_warp_threads_m(); + static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); + static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 1>; + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::RowMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) + >; /// Used for partial specialization + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape<0, 0>, + MatrixShape<0, 0>, + WarpCount::kK + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +} // namespace threadblock +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_with_reduction.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_with_reduction.h new file mode 100644 index 0000000000000000000000000000000000000000..d1507914d5860ceee28198f78ce24b313607dcab --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_with_reduction.h @@ -0,0 +1,167 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Defines basic properties needed by CTA-level GEMMs assuming + expectations about data layout of the global memory fragments, data types, + and internal tile sizes. + + Partial specializations for threadblock::Mma operations targeting TensorOp + instructions. +*/ + +#pragma once + +#include "cutlass/array.h" +#include "cutlass/cutlass.h" + +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" +#include "cutlass/layout/tensor_op_multiplicand_sm80.h" + +#include "cutlass/gemm/warp/default_mma_with_reduction_tensor_op.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" + +#include "cutlass/gemm/threadblock/default_mma_core.h" + +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" +#include "cutlass/transform/pitch_linear_thread_map.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h" +#include "cutlass/gemm/threadblock/mma_with_reduction_multistage.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Template defininng default matrix multiply operators inferred from threadblock tile size, +/// global memory data layout, and target math instruction. +template < + /// Shape of threadblock-scoped matrix multiply operator + typename Shape_, + /// Shape of warp-level matrix multiply operator + typename WarpShape, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape, + /// Element data type of A operand + typename ElementA, + /// Layout of operand A + typename LayoutA, + /// Element data type of B operand + typename ElementB, + /// Layout of operand B + typename LayoutB, + /// Data type of accumulator + typename ElementC, + /// Layout of accumulator + typename LayoutC, + /// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp) + typename OperatorClass, + /// Reduce operand A or B along K dimension + bool ReduceKForA_, + /// Number of stages + int Stages = 2, + /// Operation performed by MMA + typename Operator = typename platform::conditional< + (platform::is_same::value) && + (platform::is_same::value || + platform::is_same::value || + platform::is_same::value || + platform::is_same::value), + cutlass::arch::OpMultiplyAddSaturate, + cutlass::arch::OpMultiplyAdd>::type, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA = + cutlass::arch::CacheOperation::Global, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB = + cutlass::arch::CacheOperation::Global, + /// per-element transformation for elements of A + ComplexTransform TransformA = ComplexTransform::kNone, + /// per-element transformation for elements of B + ComplexTransform TransformB = ComplexTransform::kNone, + bool IsComplex = false// (is_complex::value || is_complex::value) +> +struct DefaultMmaWithReductionCore { + using Base = DefaultMmaCore; + using Shape = Shape_; + using IteratorThreadMapA = typename Base::IteratorThreadMapA; + using IteratorThreadMapB = typename Base::IteratorThreadMapB; + using SmemIteratorA = typename Base::SmemIteratorA; + using SmemIteratorB = typename Base::SmemIteratorB; + using SmemLayoutA = typename Base::SmemLayoutA; + using SmemLayoutB = typename Base::SmemLayoutB; + using WarpCount = typename Base::WarpCount; + + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaWithReductionTensorOp< + WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, + ElementC, LayoutC, Operator, ReduceKForA_, WarpCount::kK>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_wmma.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_wmma.h new file mode 100644 index 0000000000000000000000000000000000000000..f4d0a230eaa04aebc1aab5083bf13f4f4a2ee66c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_wmma.h @@ -0,0 +1,712 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data + layout of the global memory fragments, data types, and internal tile sizes. + + Partial specializations for threadblock::Mma operations targeting TensorOp instructions. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/fast_math.h" +#include "cutlass/arch/wmma.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h" + +#include "cutlass/gemm/warp/mma_tensor_op_wmma.h" + +#include "cutlass/gemm/warp/mma_tensor_op_policy.h" +#include "cutlass/gemm/threadblock/default_mma_core.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: row-major +/// Operator: wmma tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + ///< Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_, + /// Number of stages + int Stages> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassWmmaTensorOp; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + Shape::kK / WarpShape::kK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // + // Shared memory layouts + // + // NOTE: shared memory layout for wmma is same as the operands' layout in the global memory + using SmemLayoutA = LayoutA; + using SmemLayoutB = LayoutB; + + // Pad shared memory to avoid bank conflicts + static int const kPaddingA = 128 / sizeof_bits::value; + static int const kPaddingB = 128 / sizeof_bits::value; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + IteratorThreadMapA + >; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Wmma< + InstructionShape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + Operator + >, + cutlass::MatrixShape<1, 1> + >; + + using MmaTensorOp = cutlass::gemm::warp::MmaTensorOpWmma< + WarpShape, + ElementA, + SmemLayoutA, + ElementB, + SmemLayoutB, + ElementC, + LayoutC, + Policy + >; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaTensorOp, + MatrixShape, + MatrixShape<0, kPaddingB>, + WarpCount::kK + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: column-major +/// Operator: wmma tensorop class +/// +/// This uses the default warp-level operator given tile sizes +template < + ///< Shape of threadblock-scoped matrix multiply operator + ///< (concept:GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) [allowed + /// wmma instruction shapes, e.g., 16x16x16, 32x8x16, 8x32x16,...] + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by GEMM + typename Operator_, + /// Number of stages + int Stages> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassWmmaTensorOp; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + Shape::kK / WarpShape::kK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads per threadblock + static int const kThreads = WarpCount::kCount * kWarpSize; + + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kWarpThreadArrangementContiguousA = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedA = + kWarpSize / kWarpThreadArrangementContiguousA; + + static int const kWarpThreadArrangementContiguousB = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedB = + kWarpSize / kWarpThreadArrangementContiguousB; + + // + // Shared memory layouts + // + + // shared memory layout for wmma is same as the operands' layout in global memory + using SmemLayoutA = LayoutA; + using SmemLayoutB = LayoutB; + + // Pad shared memory to avoid bank conflicts + static int const kPaddingA = 128 / sizeof_bits::value; + static int const kPaddingB = 128 / sizeof_bits::value; + + // + // Iterators to write to shared memory + // + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + IteratorThreadMapA + >; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + IteratorThreadMapB // SmemThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Wmma< + InstructionShape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + Operator + >, + cutlass::MatrixShape<1, 1> + >; + + using MmaTensorOp = cutlass::gemm::warp::MmaTensorOpWmma< + WarpShape, + ElementA, + SmemLayoutA, + ElementB, + SmemLayoutB, + ElementC, + LayoutC, + Policy + >; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaTensorOp, + MatrixShape<0, kPaddingA>, + MatrixShape, + WarpCount::kK + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: row-major +/// B: row-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by MMA + typename Operator_, + /// Number of stages + int Stages> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using ElementB = ElementB_; + using LayoutB = layout::RowMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassWmmaTensorOp; + + /// Number of warps present + using WarpCount = GemmShape< + Shape::kM / WarpShape::kM, + Shape::kN / WarpShape::kN, + Shape::kK / WarpShape::kK + >; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && + !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." + ); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kWarpThreadArrangementContiguousA = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedA = + kWarpSize / kWarpThreadArrangementContiguousA; + + // + // Shared memory layouts + // + + // shared memory layout for wmma is same as the operands' layout in global memory + using SmemLayoutA = LayoutA; + using SmemLayoutB = LayoutB; + + // Pad shared memory to avoid bank conflicts + static int const kPaddingA = 128 / sizeof_bits::value; + static int const kPaddingB = 128 / sizeof_bits::value; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kAccessSizeInBits / sizeof_bits::value + >; + + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementA, + SmemLayoutA, + 1, + IteratorThreadMapA + >; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, + ElementB, + SmemLayoutB, + 0, + IteratorThreadMapB + >; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Wmma< + InstructionShape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + Operator + >, + cutlass::MatrixShape<1, 1> + >; + + using MmaTensorOp = cutlass::gemm::warp::MmaTensorOpWmma< + WarpShape, + ElementA, + SmemLayoutA, + ElementB, + SmemLayoutB, + ElementC, + LayoutC, + Policy + >; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaTensorOp, + MatrixShape<0, kPaddingA>, + MatrixShape<0, kPaddingB>, + WarpCount::kK + >; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: +/// +/// A: column-major +/// B: column-major +/// Operator: tensor op class +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A operand + typename ElementA_, + /// Data type of B operand + typename ElementB_, + /// Data type of accumulator + typename ElementC_, + /// Layout of accumulator + typename LayoutC_, + /// Operation performed by MMA + typename Operator_, + /// Number of stages + int Stages> +struct DefaultMmaCore { + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using ElementB = ElementB_; + using LayoutB = layout::ColumnMajor; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using OperatorClass = arch::OpClassWmmaTensorOp; + + /// Number of warps present + using WarpCount = + GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped access + static int const kAccessSizeInBits = 128; + + /// Default Operator + using Operator = Operator_; + + // Warp thread arrangement + static int const kWarpThreadArrangementContiguousB = + Shape::kK / (kAccessSizeInBits / sizeof_bits::value); + + static int const kWarpThreadArrangementStridedB = + kWarpSize / kWarpThreadArrangementContiguousB; + + // + // Shared memory layouts + // + + // shared memory layout for wmma is same as the operands' layout in global memory + using SmemLayoutA = LayoutA; + using SmemLayoutB = LayoutB; + + // Pad shared memory to avoid bank conflicts + static int const kPaddingA = 128 / sizeof_bits::value; + static int const kPaddingB = 128 / sizeof_bits::value; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kAccessSizeInBits / sizeof_bits::value + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Wmma< + InstructionShape, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + Operator + >, + cutlass::MatrixShape<1, 1> + >; + + using MmaTensorOp = cutlass::gemm::warp::MmaTensorOpWmma< + WarpShape, + ElementA, + SmemLayoutA, + ElementB, + SmemLayoutB, + ElementC, + LayoutC, + Policy + >; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaTensorOp, + MatrixShape, + MatrixShape, + WarpCount::kK + >; +}; + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +#endif // defined(CUTLASS_ARCH_WMMA_ENABLED) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_layernorm_mainloop_fusion.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_layernorm_mainloop_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..b05c63498bbeac11b768ecbd4b8fbc303d8f621d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_layernorm_mainloop_fusion.h @@ -0,0 +1,178 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/arch.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/gemm/threadblock/default_mma_core.h" +#include "cutlass/gemm/threadblock/mma_layernorm_mainloop_fusion_multistage.h" +#include "cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h" +#include "cutlass/transform/threadblock/predicated_scale_bias_vector_access_iterator.h" +#include "cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h" +#include "cutlass/gemm/warp/scale_bias_tile_iterator.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for Scale/Bias vectors + typename ElementScaleBias, + /// Layout type for Scale/Bias vectors + typename LayoutScaleBias, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false, + /// Use zfill or predicate for SM80 out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone + > +struct DefaultMmaLayernormMainloopFusion { + + static cutlass::arch::CacheOperation::Kind const CacheOpA = + ((sizeof_bits::value * kAlignmentA) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + static cutlass::arch::CacheOperation::Kind const CacheOpB = + ((sizeof_bits::value * kAlignmentB) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + static cutlass::arch::CacheOperation::Kind const CacheOpGammaBeta = CacheOpA; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, + Stages, Operator, false, CacheOpA, CacheOpB>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; + + /// Define iterators over tiles from scale/bias vectors + using IteratorVarMean = + cutlass::transform::threadblock::PredicatedScaleBiasVectorIterator< + cutlass::MatrixShape<1, WarpShape::kN>, + ElementScaleBias, + LayoutScaleBias>; + + /// Define iterators over tiles from scale/bias vectors + using IteratorGammaBeta = + cutlass::transform::threadblock::PredicatedScaleBiasVectorAccessIterator< + cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias, + LayoutScaleBias>; + + using SmemIteratorGammaBeta = + cutlass::transform::threadblock::RegularScaleBiasVectorAccessIterator< + cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias, + LayoutScaleBias>; + + static int const kThreadCount = 32; + + // Warp-level iterators to load scale and bias vectors + using WarpIteratorGammaBeta = cutlass::gemm::warp::ScaleBiasTileIterator< + MatrixShape, ElementScaleBias, + LayoutScaleBias, MatrixShape, + typename MmaCore::MmaTensorOp::IteratorA::Base::Policy, kThreadCount, + MmaCore::WarpCount::kK>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaLayernormMainloopFusionMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, IteratorVarMean, IteratorGammaBeta, SmemIteratorGammaBeta, + CacheOpGammaBeta, + ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, WarpIteratorGammaBeta, Stages, SharedMemoryClear>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_planar_complex_multistage.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_planar_complex_multistage.h new file mode 100644 index 0000000000000000000000000000000000000000..6915b2010d01c1c95f763dbdd837f890fa518ae4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_planar_complex_multistage.h @@ -0,0 +1,136 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Template for a multistage GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/arch/arch.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/gemm/threadblock/default_mma.h" +#include "cutlass/gemm/threadblock/mma_planar_complex_multistage.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Operator class tag + typename OperatorClass_, + /// Tag indicating architecture to tune for + typename ArchTag_, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape_, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape_, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape_, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA = ComplexTransform::kNone, + /// Complex transformation on operand B + ComplexTransform TransformB = ComplexTransform::kNone, + /// Math operator tag (e.g. arch::OpMultiplyAdd) + typename Operator = arch::OpMultiplyAdd +> +struct DefaultMmaPlanarComplexMultistage { + + // Construct a planar complex variant from the real-valued variant + using RealMmaMultistage = typename DefaultMma< + ElementA_, + LayoutA_, + kAlignmentA, + ElementB_, + LayoutB_, + kAlignmentB, + ElementAccumulator_, + LayoutC_, + OperatorClass_, + ArchTag_, + ThreadblockShape_, + WarpShape_, + InstructionShape_, + Stages, + Operator + >::ThreadblockMma; + + using ThreadblockMma = MmaPlanarComplexMultistage< + ThreadblockShape_, + typename RealMmaMultistage::IteratorA, + typename RealMmaMultistage::SmemIteratorA, + cutlass::arch::CacheOperation::Global, + typename RealMmaMultistage::IteratorB, + typename RealMmaMultistage::SmemIteratorB, + cutlass::arch::CacheOperation::Global, + ElementAccumulator_, + LayoutC_, + typename RealMmaMultistage::Policy, + Stages, + TransformA, + TransformB + >; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_planar_complex_pipelined.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_planar_complex_pipelined.h new file mode 100644 index 0000000000000000000000000000000000000000..a7ae5a446d3f1251ee520a4ecd432b4043ed1e88 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_planar_complex_pipelined.h @@ -0,0 +1,130 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +#include "cutlass/gemm/warp/mma_planar_complex.h" +#include "cutlass/gemm/threadblock/default_mma.h" +#include "cutlass/gemm/threadblock/mma_planar_complex_pipelined.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Operator class tag + typename OperatorClass_, + /// Tag indicating architecture to tune for + typename ArchTag_, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape_, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape_, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape_, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA = ComplexTransform::kNone, + /// Complex transformation on operand B + ComplexTransform TransformB = ComplexTransform::kNone, + /// Math operator tag (e.g. arch::OpMultiplyAdd) + typename Operator = arch::OpMultiplyAdd +> +struct DefaultMmaPlanarComplexPipelined { + + // Construct a planar complex variant from the real-valued variant + using RealMma = typename DefaultMma< + ElementA_, + LayoutA_, + kAlignmentA, + ElementB_, + LayoutB_, + kAlignmentB, + ElementAccumulator_, + LayoutC_, + OperatorClass_, + ArchTag_, + ThreadblockShape_, + WarpShape_, + InstructionShape_, + Stages, + Operator + >::ThreadblockMma; + + using ThreadblockMma = MmaPlanarComplexPipelined< + ThreadblockShape_, + typename RealMma::IteratorA, + typename RealMma::SmemIteratorA, + typename RealMma::IteratorB, + typename RealMma::SmemIteratorB, + ElementAccumulator_, + LayoutC_, + typename RealMma::Policy, + Stages, + TransformA, + TransformB + >; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_softmax_mainloop_fusion.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_softmax_mainloop_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..e8db4d8eb8cbafa50208eb07fc66332454a69ccd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_softmax_mainloop_fusion.h @@ -0,0 +1,160 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a pipelined softmax-GEMM kernel. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/arch.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/gemm/threadblock/default_mma_core.h" +#include "cutlass/gemm/threadblock/mma_softmax_mainloop_fusion_multistage.h" +#include "cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h" +#include "cutlass/transform/threadblock/predicated_scale_bias_vector_access_iterator.h" +#include "cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h" +#include "cutlass/gemm/warp/scale_bias_tile_iterator.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for Scale/Bias vectors + typename ElementScaleBias, + /// Layout type for Scale/Bias vectors + typename LayoutScaleBias, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Whether problem has been transformed. This determines to which operand + /// the softmax is applied. + bool InternalTranspose, + /// Operation perfomed by GEMM + typename Operator, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false, + /// Use zfill or predicate for SM80 out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone + > +struct DefaultMmaSoftmaxMainloopFusion { + + static cutlass::arch::CacheOperation::Kind const CacheOpA = + ((sizeof_bits::value * kAlignmentA) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + static cutlass::arch::CacheOperation::Kind const CacheOpB = + ((sizeof_bits::value * kAlignmentB) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + static cutlass::arch::CacheOperation::Kind const CacheOpGammaBeta = CacheOpA; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, + Stages, Operator, false, CacheOpA, CacheOpB>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; + + /// Define iterators over tiles from scale/bias vectors + using IteratorNormSum = + cutlass::transform::threadblock::PredicatedScaleBiasVectorIterator< + cutlass::MatrixShape<1, WarpShape::kN>, + ElementScaleBias, + LayoutScaleBias>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaSoftmaxMainloopFusionMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, IteratorNormSum, + ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages, InternalTranspose, SharedMemoryClear>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_with_reduction.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_with_reduction.h new file mode 100644 index 0000000000000000000000000000000000000000..bc6957a064a8e6c8e95ac48755751f9143d9c99d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_with_reduction.h @@ -0,0 +1,141 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/arch.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h" +#include "cutlass/gemm/threadblock/default_mma_core_with_reduction.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Operator class tag + typename OperatorClass, + /// + bool ReduceKForA_, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false, + /// Use zfill or predicate for SM80 out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone + > +struct DefaultMmaWithReduction { + + static cutlass::arch::CacheOperation::Kind const CacheOpA = + ((sizeof_bits::value * kAlignmentA) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + static cutlass::arch::CacheOperation::Kind const CacheOpB = + ((sizeof_bits::value * kAlignmentB) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaWithReductionCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, + ReduceKForA_, Stages, Operator, false, CacheOpA, CacheOpB>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaWithReductionMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages, SharedMemoryClear>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..4bd35304abed726225d1f451956fe8a3b8c08be7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex.h @@ -0,0 +1,159 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Template for a multistage GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/arch/arch.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/numeric_types.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Element type for internal accumulation + typename ElementAccumulator_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Operator class tag + typename OperatorClass_, + /// Tag indicating architecture to tune for + typename ArchTag_, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape_, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape_, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape_, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA = ComplexTransform::kNone, + /// Complex transformation on operand B + ComplexTransform TransformB = ComplexTransform::kNone, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator = arch::OpMultiplyAddComplex, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false> +struct DefaultMultistageMmaComplex; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator> +struct DefaultMultistageMmaComplex { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplexCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, OperatorClass, + Stages, TransformA, TransformB, Operator>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages>; +}; + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex_core.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex_core.h new file mode 100644 index 0000000000000000000000000000000000000000..79b4ec37dc8d15dead8262b865c1ebbd8dc4a712 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex_core.h @@ -0,0 +1,119 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines basic properties needed by CTA-level GEMMs assuming + expectations about data layout of the global memory fragments, data types, + and internal tile sizes. + + Partial specializations for threadblock::Mma operations targeting TensorOp + instructions. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/complex.h" + +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" +#include "cutlass/layout/tensor_op_multiplicand_sm80.h" + +#include "cutlass/gemm/warp/mma_simt_policy.h" +#include "cutlass/gemm/warp/mma_simt.h" +#include "cutlass/gemm/warp/default_mma_tensor_op.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" + +#include "cutlass/gemm/threadblock/default_mma_core.h" + +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" +#include "cutlass/transform/pitch_linear_thread_map.h" + +#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Template defininng default matrix multiply operators inferred from +/// threadblock tile size, global memory data layout, and target math +/// instruction. +template < + /// Shape of threadblock-scoped matrix multiply operator + typename Shape, + /// Shape of warp-level matrix multiply operator + typename WarpShape, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape, + /// Element data type of A operand + typename ElementA, + /// Layout of operand A + typename LayoutA, + /// Element data type of B operand + typename ElementB, + /// Layout of operand B + typename LayoutB, + /// Data type of accumulator + typename ElementC, + /// Layout of accumulator + typename LayoutC, + /// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp) + typename OperatorClass, + /// Number of stages + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator = arch::OpMultiplyAddComplex, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA = + cutlass::arch::CacheOperation::Global, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB = + cutlass::arch::CacheOperation::Global> +struct DefaultMultistageMmaComplexCore; + + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h new file mode 100644 index 0000000000000000000000000000000000000000..d660a3e797093398fce36f7d693fbd693172f2bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h @@ -0,0 +1,1808 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines basic properties needed by CTA-level GEMMs assuming + expectations about data layout of the global memory fragments, data types, + and internal tile sizes. + + Partial specializations for threadblock::Mma operations targeting TensorOp + instructions. +*/ + +#pragma once + +#include "cutlass/array.h" +#include "cutlass/cutlass.h" + +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" +#include "cutlass/layout/tensor_op_multiplicand_sm80.h" + +#include "cutlass/gemm/warp/mma_simt_policy.h" +#include "cutlass/gemm/warp/mma_simt.h" +#include "cutlass/gemm/warp/default_mma_complex_tensor_op.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" + +#include "cutlass/gemm/threadblock/default_multistage_mma_complex_core.h" + +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" +#include "cutlass/transform/pitch_linear_thread_map.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h" +#include "cutlass/gemm/threadblock/mma_multistage.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex double-precision +/// +/// A: column-major +/// B: row-major +/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMultistageMmaComplexCore< + Shape_, WarpShape_, InstructionShape_, + complex, layout::ColumnMajor, + complex, layout::RowMajor, + complex, LayoutC_, + arch::OpClassTensorOp, + Stages, + TransformA, TransformB, + Operator_, + CacheOpA, CacheOpB> { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = complex; + using LayoutA = layout::ColumnMajor; + using ElementB = complex; + using LayoutB = layout::RowMajor; + using ElementC = complex; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + using Operator = Operator_; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped 128 + static int const kAccessSizeInBits = 128; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous128b; + + using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous128b; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + WarpShape, InstructionShape, + ElementA, SmemLayoutA, + ElementB, SmemLayoutB, + ElementC, LayoutC, + kTransformA, kTransformB, + Operator>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + + +/// Partial specialization for complex double-precision +/// +/// A: column-major +/// B: row-major +/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMultistageMmaComplexCore< + Shape_, WarpShape_, InstructionShape_, + complex, layout::ColumnMajor, + complex, layout::ColumnMajor, + complex, LayoutC_, + arch::OpClassTensorOp, + Stages, + TransformA, TransformB, + Operator_, + CacheOpA, CacheOpB> { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = complex; + using LayoutA = layout::ColumnMajor; + using ElementB = complex; + using LayoutB = layout::ColumnMajor; + using ElementC = complex; + using LayoutC = LayoutC_; + static int const kStages = Stages; + using Operator = Operator_; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped 128 + static int const kAccessSizeInBits = 128; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous128b; + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + WarpShape, InstructionShape, + ElementA, SmemLayoutA, + ElementB, SmemLayoutB, + ElementC, LayoutC, + kTransformA, kTransformB, + Operator>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex double-precision +/// +/// A: row-major +/// B: column-major +/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMultistageMmaComplexCore< + Shape_, WarpShape_, InstructionShape_, + complex, layout::RowMajor, + complex, layout::ColumnMajor, + complex, LayoutC_, + arch::OpClassTensorOp, + Stages, + TransformA, TransformB, + Operator_, + CacheOpA, CacheOpB> { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = complex; + using LayoutA = layout::RowMajor; + using ElementB = complex; + using LayoutB = layout::ColumnMajor; + using ElementC = complex; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + using Operator = Operator_; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped 128 + static int const kAccessSizeInBits = 128; + + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise128x4; + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + WarpShape, InstructionShape, + ElementA, SmemLayoutA, + ElementB, SmemLayoutB, + ElementC, LayoutC, + kTransformA, kTransformB, + Operator>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + + +/// Partial specialization for complex double-precision +/// +/// A: row-major +/// B: row-major +/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMultistageMmaComplexCore< + Shape_, WarpShape_, InstructionShape_, + complex, layout::RowMajor, + complex, layout::RowMajor, + complex, LayoutC_, + arch::OpClassTensorOp, + Stages, + TransformA, TransformB, + Operator_, + CacheOpA, CacheOpB> { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using ElementA = complex; + using LayoutA = layout::RowMajor; + using ElementB = complex; + using LayoutB = layout::RowMajor; + using ElementC = complex; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + using Operator = Operator_; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped 128 + static int const kAccessSizeInBits = 128; + + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise128x4; + using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous128b; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<8, 4>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + WarpShape, InstructionShape, + ElementA, SmemLayoutA, + ElementB, SmemLayoutB, + ElementC, LayoutC, + kTransformA, kTransformB, + Operator>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex floating-point +/// +/// A: column-major +/// B: column-major +/// Operator: arch::OpMultiplyAddComplex +/// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex) + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMultistageMmaComplexCore< + Shape_, WarpShape_, GemmShape<16, 8, 8>, + complex, layout::ColumnMajor, + complex, layout::ColumnMajor, + complex, LayoutC_, + arch::OpClassTensorOp, + Stages, + TransformA, TransformB, + Operator_, + CacheOpA, CacheOpB> { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<16, 8, 8>; + using ElementA = complex; + using LayoutA = layout::ColumnMajor; + using ElementB = complex; + using LayoutB = layout::ColumnMajor; + using ElementC = complex; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + using Operator = Operator_; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped + static int const kAccessSizeInBits = 64; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b; + + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + WarpShape, InstructionShape, + ElementA, SmemLayoutA, + ElementB, SmemLayoutB, + ElementC, LayoutC, + kTransformA, kTransformB, + Operator>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + + +/// Partial specialization for complex floating-point +/// +/// A: column-major +/// B: row-major +/// Operator: arch::OpMultiplyAddComplex +/// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex) + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMultistageMmaComplexCore< + Shape_, WarpShape_, GemmShape<16, 8, 8>, + complex, layout::ColumnMajor, + complex, layout::RowMajor, + complex, LayoutC_, + arch::OpClassTensorOp, + Stages, + TransformA, TransformB, + Operator_, + CacheOpA, CacheOpB> { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<16, 8, 8>; + using ElementA = complex; + using LayoutA = layout::ColumnMajor; + using ElementB = complex; + using LayoutB = layout::RowMajor; + using ElementC = complex; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + using Operator = Operator_; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped + static int const kAccessSizeInBits = 64; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b; + + using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + WarpShape, InstructionShape, + ElementA, SmemLayoutA, + ElementB, SmemLayoutB, + ElementC, LayoutC, + kTransformA, kTransformB, + Operator>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex floating-point +/// +/// A: row-major +/// B: column-major +/// Operator: arch::OpMultiplyAddComplex +/// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex) + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMultistageMmaComplexCore< + Shape_, WarpShape_, GemmShape<16, 8, 8>, + complex, layout::RowMajor, + complex, layout::ColumnMajor, + complex, LayoutC_, + arch::OpClassTensorOp, + Stages, + TransformA, TransformB, + Operator_, + CacheOpA, CacheOpB> { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<16, 8, 8>; + using ElementA = complex; + using LayoutA = layout::RowMajor; + using ElementB = complex; + using LayoutB = layout::ColumnMajor; + using ElementC = complex; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + using Operator = Operator_; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped + static int const kAccessSizeInBits = 64; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise; + + using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + WarpShape, InstructionShape, + ElementA, SmemLayoutA, + ElementB, SmemLayoutB, + ElementC, LayoutC, + kTransformA, kTransformB, + Operator>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex floating-point +/// +/// A: row-major +/// B: row-major +/// Operator: arch::OpMultiplyAddComplex +/// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex) + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMultistageMmaComplexCore< + Shape_, WarpShape_, GemmShape<16, 8, 8>, + complex, layout::RowMajor, + complex, layout::RowMajor, + complex, LayoutC_, + arch::OpClassTensorOp, + Stages, + TransformA, TransformB, + Operator_, + CacheOpA, CacheOpB> { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<16, 8, 8>; + using ElementA = complex; + using LayoutA = layout::RowMajor; + using ElementB = complex; + using LayoutB = layout::RowMajor; + using ElementC = complex; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + using Operator = Operator_; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of a threadblock-scoped + static int const kAccessSizeInBits = 64; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise; + + using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 1, + IteratorThreadMapA>; + + /// ThreadMap of iterator B + using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap< + layout::PitchLinearShape, kThreads, + layout::PitchLinearShape<16, 2>, + kAccessSizeInBits / sizeof_bits::value>; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 0, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level tensor op + using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< + WarpShape, InstructionShape, + ElementA, SmemLayoutA, + ElementB, SmemLayoutB, + ElementC, LayoutC, + kTransformA, kTransformB, + Operator>::Type; + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy, + MatrixShape<0, 0>, WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex SIMT operation +/// +/// A: column-major +/// B: column-major +/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + typename RealA, + typename RealB, + typename RealC, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMultistageMmaComplexCore< + Shape_, WarpShape_, GemmShape<1, 1, 1>, + complex, layout::ColumnMajor, + complex, layout::ColumnMajor, + complex, LayoutC_, + arch::OpClassSimt, + Stages, + TransformA, TransformB, + Operator_, + CacheOpA, CacheOpB> { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 1>; + using ElementA = complex; + using LayoutA = layout::ColumnMajor; + using ElementB = complex; + using LayoutB = layout::ColumnMajor; + using ElementC = complex; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + using Operator = Operator_; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of access + static int const kAccessSizeInBits = sizeof_bits::value; + + /// No vectorized accesses + static int const kElementsPerAccess = 1; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajor; + + using SmemLayoutB = layout::RowMajor; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 0, + IteratorThreadMapA>; + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Transpose the ThreadMap of iterator B + using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + SmemThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = 4; + static const int WarpNumThreadsN = 8; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); + static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 1>; + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::RowMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + 1, /// 1 partition along K dimension + kTransformA, /// Transform for A + kTransformB /// Transform for B + >; /// Used for partial specialization + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape<0, 0>, + MatrixShape<0, Shape::kK / 32>, + WarpCount::kK>; +}; + +/// Partial specialization for complex SIMT operation +/// +/// A: column-major +/// B: row-major +/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + typename RealA, + typename RealB, + typename RealC, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMultistageMmaComplexCore< + Shape_, WarpShape_, GemmShape<1, 1, 1>, + complex, layout::ColumnMajor, + complex, layout::RowMajor, + complex, LayoutC_, + arch::OpClassSimt, + Stages, + TransformA, TransformB, + Operator_, + CacheOpA, CacheOpB> { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 1>; + using ElementA = complex; + using LayoutA = layout::ColumnMajor; + using ElementB = complex; + using LayoutB = layout::RowMajor; + using ElementC = complex; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + using Operator = Operator_; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of access + static int const kAccessSizeInBits = sizeof_bits::value; + + /// No vectorized accesses + static int const kElementsPerAccess = 1; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajor; + + using SmemLayoutB = layout::RowMajor; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 0, + IteratorThreadMapA>; + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = 4; + static const int WarpNumThreadsN = 8; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); + static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 1>; + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::RowMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + 1, /// 1 partition along K dimension + kTransformA, /// Transform for A + kTransformB /// Transform for B + >; /// Used for partial specialization + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape<0, 0>, + MatrixShape<0, 0>, // or Shape::kK / 32 + WarpCount::kK>; +}; + +/// Partial specialization for complex SIMT operation +/// +/// A: row-major +/// B: column-major +/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + typename RealA, + typename RealB, + typename RealC, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMultistageMmaComplexCore< + Shape_, WarpShape_, GemmShape<1, 1, 1>, + complex, layout::RowMajor, + complex, layout::ColumnMajor, + complex, LayoutC_, + arch::OpClassSimt, + Stages, + TransformA, TransformB, + Operator_, + CacheOpA, CacheOpB> { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 1>; + using ElementA = complex; + using LayoutA = layout::RowMajor; + using ElementB = complex; + using LayoutB = layout::ColumnMajor; + using ElementC = complex; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + using Operator = Operator_; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of access + static int const kAccessSizeInBits = sizeof_bits::value; + + /// No vectorized accesses + static int const kElementsPerAccess = 1; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajor; + + using SmemLayoutB = layout::RowMajor; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 0, + SmemThreadMapA>; + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Transpose the ThreadMap of iterator B + using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + SmemThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = 4; + static const int WarpNumThreadsN = 8; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); + static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 1>; + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::RowMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + 1, /// 1 partition along K dimension + kTransformA, /// Transform for A + kTransformB /// Transform for B + >; /// Used for partial specialization + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape, + MatrixShape<0, Shape::kK / 32>, + WarpCount::kK>; +}; + +/// Partial specialization for complex SIMT operation +/// +/// A: row-major +/// B: row-major +/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex +/// +/// This uses the default warp-level operator given tile sizes +template < + /// Shape of threadblock-scoped matrix multiply operator (concept: + /// GemmShape) + typename Shape_, + /// Shape of warp-level matrix multiply operator (concept: GemmShape) + typename WarpShape_, + typename RealA, + typename RealB, + typename RealC, + /// Layout of accumulator + typename LayoutC_, + /// Number of stages + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator_, + /// Cache operation of operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Cache operation of operand B + cutlass::arch::CacheOperation::Kind CacheOpB> +struct DefaultMultistageMmaComplexCore< + Shape_, WarpShape_, GemmShape<1, 1, 1>, + complex, layout::RowMajor, + complex, layout::RowMajor, + complex, LayoutC_, + arch::OpClassSimt, + Stages, + TransformA, TransformB, + Operator_, + CacheOpA, CacheOpB> { + + using Shape = Shape_; + using WarpShape = WarpShape_; + using InstructionShape = GemmShape<1, 1, 1>; + using ElementA = complex; + using LayoutA = layout::RowMajor; + using ElementB = complex; + using LayoutB = layout::RowMajor; + using ElementC = complex; + using LayoutC = LayoutC_; + static int const kStages = Stages; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + using Operator = Operator_; + static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; + + /// Number of warps present + using WarpCount = GemmShape; + + // Divisility requirements + static_assert( + !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), + "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); + + static_assert(WarpCount::kCount > 1, + "This specialization requires at least two warps."); + + /// Number of threads per warp + static int const kWarpSize = warp::WarpSize::value; + + /// Number of threads total + static int const kThreads = WarpCount::kCount * kWarpSize; + + /// Size of access + static int const kAccessSizeInBits = sizeof_bits::value; + + /// No vectorized accesses + static int const kElementsPerAccess = 1; + + // + // Shared memory layouts + // + + using SmemLayoutA = layout::ColumnMajor; + + using SmemLayoutB = layout::RowMajor; + + // + // Iterators to write to shared memory + // + + /// ThreadMap of iterator A + using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Transpose the ThreadMap of iterator A + using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt; + + /// Shared memory iterator to A operand + using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementA, SmemLayoutA, 0, + SmemThreadMapA>; + + /// Policy of iterator B + using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< + layout::PitchLinearShape, + kThreads, + kElementsPerAccess + >; + + /// Shared memory iterator to B operand + using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< + MatrixShape, ElementB, SmemLayoutB, 1, + IteratorThreadMapB>; + + // + // Warp-level matrix multiply operator + // + + // Define the warp-level op + static const int WarpNumThreadsM = 4; + static const int WarpNumThreadsN = 8; + static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), + "WarpShape must be divisible by ThreadTile shape."); + static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; + static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; + static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; + static const int numElementsA = 128 / sizeof_bits::value; + static const int numElementsB = 128 / sizeof_bits::value; + static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); + static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); + // these should have max of thread tile also + using LaneMmaShape = cutlass::gemm::GemmShape< + LaneM, + LaneN, + 1>; + using Policy = cutlass::gemm::warp::MmaSimtPolicy< + cutlass::MatrixShape, // WarpShape + cutlass::layout::RowMajorInterleaved, // LaneLayout + LaneMmaShape + >; + + using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< + WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 + ElementA, /// Data type of A elements + SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) + ElementB, /// Data type of B elements + SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) + ElementC, /// Element type of C matrix + LayoutC, /// Layout of C matrix (concept: MatrixLayout) + Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + 1, /// 1 partition along K dimension + kTransformA, /// Transform for A + kTransformB /// Transform for B + >; /// Used for partial specialization + + /// Policy used to define MmaPipelined + using MmaPolicy = MmaPolicy< + MmaWarpSimt, + MatrixShape, + MatrixShape<0, 0>, // or Shape::kK / 32 + WarpCount::kK>; +}; + +//////////////////////////////////////////////////////////////////////////////// + + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_trmm_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_trmm_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..367869ec2ea28a465abe0e4454c3a3b063bcb098 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_trmm_complex.h @@ -0,0 +1,556 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Template for a multistage GEMM kernel. Does not compute batching or support split-K. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/arch/arch.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/numeric_types.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator_triangular_matrix.h" +#include "cutlass/gemm/threadblock/mma_blas3_multistage.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Side Mode for the kernel + SideMode kSideMode, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Diag Type for the triangular matrix + DiagType kDiagType, + /// Element type for internal accumulation + typename ElementAccumulator_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Operator class tag + typename OperatorClass_, + /// Tag indicating architecture to tune for + typename ArchTag_, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape_, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape_, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape_, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA = ComplexTransform::kNone, + /// Complex transformation on operand B + ComplexTransform TransformB = ComplexTransform::kNone, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator = arch::OpMultiplyAddComplex, + /// Blas3 computation mode + BlasMode BlasMode_ = BlasMode::kTriangular, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false> +struct DefaultMultistageTrmmComplex; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Side Mode for the kernel + SideMode kSideMode, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Diag Type for the triangular matrix + DiagType kDiagType, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator> +struct DefaultMultistageTrmmComplex { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplexCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, OperatorClass, + Stages, TransformA, TransformB, Operator>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, + kSideMode, kFillMode, kDiagType, + AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, + kSideMode, FillMode::kFull, DiagType::kInvalid, + AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages, SharedMemoryClearOption::kZfill>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output and right-side mode +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Diag Type for the triangular matrix + DiagType kDiagType, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator> +struct DefaultMultistageTrmmComplex { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplexCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, OperatorClass, + Stages, TransformA, TransformB, Operator>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, + SideMode::kRight, FillMode::kFull, DiagType::kInvalid, + AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, + SideMode::kRight, kFillMode, kDiagType, + AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages, SharedMemoryClearOption::kZfill>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output with unit diagonal +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Side Mode for the kernel + SideMode kSideMode, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator> +struct DefaultMultistageTrmmComplex { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplexCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, OperatorClass, + Stages, TransformA, TransformB, Operator>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, + kSideMode, kFillMode, DiagType::kUnit, + AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, + kSideMode, FillMode::kFull, DiagType::kInvalid, + AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaBlas3Multistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages, SharedMemoryClearOption::kZfill>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output and right-side mode, unit diagonal +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator> +struct DefaultMultistageTrmmComplex { + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplexCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, OperatorClass, + Stages, TransformA, TransformB, Operator>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, + SideMode::kRight, FillMode::kFull, DiagType::kInvalid, + AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, + SideMode::kRight, kFillMode, DiagType::kUnit, + AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaBlas3Multistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages, SharedMemoryClearOption::kZfill>; +}; + + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output (for TRMM where diagonal imag part is ignored - used by HEMM) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Side Mode for the kernel + SideMode kSideMode, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator> +struct DefaultMultistageTrmmComplex { + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplexCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, OperatorClass, + Stages, TransformA, TransformB, Operator>; + + // Define iterators over tiles from the A operand + // PredicatedTileAccessIteratorTriangularMatrix only tracks diagonal elements, + // when DiagType is kUnit + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, + kSideMode, kFillMode, DiagType::kUnit, + AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, + kSideMode, FillMode::kFull, DiagType::kInvalid, + AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaBlas3Multistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages, SharedMemoryClearOption::kZfill, + BlasMode::kHermitian>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output and right-side mode (for TRMM where diagonal imag part is ignored - used by HEMM) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Complex transformation on operand A + ComplexTransform TransformA, + /// Complex transformation on operand B + ComplexTransform TransformB, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator> +struct DefaultMultistageTrmmComplex { + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplexCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, OperatorClass, + Stages, TransformA, TransformB, Operator>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, + SideMode::kRight, FillMode::kFull, DiagType::kInvalid, + AccessTypeA>; + + // Define iterators over tiles from the B operand + // PredicatedTileAccessIteratorTriangularMatrix only tracks diagonal elements, + // when DiagType is kUnit + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, + SideMode::kRight, kFillMode, DiagType::kUnit, + AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaBlas3Multistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages, SharedMemoryClearOption::kZfill, + BlasMode::kHermitian>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_sparse_mma.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_sparse_mma.h new file mode 100644 index 0000000000000000000000000000000000000000..5faa76bea6f08707371e892069307ffb37db068b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_sparse_mma.h @@ -0,0 +1,196 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/arch.h" +#include "cutlass/arch/wmma.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h" +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +#include "cutlass/gemm/threadblock/default_mma_core_wmma.h" +#endif //CUTLASS_ARCH_WMMA_ENABLED + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Operator class tag + typename OperatorClass_, + /// Tag indicating architecture to tune for + typename ArchTag_, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape_, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape_, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape_, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false + > +struct DefaultSparseMma; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output (OperatorClass TensorOp) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator + > +struct DefaultSparseMma { + static cutlass::arch::CacheOperation::Kind const CacheOpA = + ((sizeof_bits::value * kAlignmentA) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + static cutlass::arch::CacheOperation::Kind const CacheOpB = + ((sizeof_bits::value * kAlignmentB) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, + Stages, Operator, false, CacheOpA, CacheOpB>; + + static int const kSparse = MmaCore::kSparse; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>; + + // Define iterators over tiles from the E operand + using ElementE = typename MmaCore::ElementE; + using LayoutE = typename MmaCore::GmemLayoutE; + using ThreadMapE = typename MmaCore::IteratorThreadMapE; + using AccessTypeE = + cutlass::Array::value>; + using IteratorE = + cutlass::transform::threadblock::PredicatedTileAccessIterator< + cutlass::MatrixShape, + ElementE, LayoutE, 1, ThreadMapE, AccessTypeE>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::SparseMmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + IteratorE, typename MmaCore::SmemIteratorE, MmaCore::kCacheOpE, + typename MmaCore::MmaPolicy, Stages>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_trmm.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_trmm.h new file mode 100644 index 0000000000000000000000000000000000000000..8c13d17725dab931b257cab6bc75141738ccef24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_trmm.h @@ -0,0 +1,445 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +// +/*! \file + \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/arch/arch.h" +#include "cutlass/arch/wmma.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator_triangular_matrix.h" +#include "cutlass/gemm/threadblock/mma_blas3_multistage.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +#include "cutlass/gemm/threadblock/default_mma_core_wmma.h" +#endif //CUTLASS_ARCH_WMMA_ENABLED + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Side Mode for the kernel + SideMode kSideMode, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Diag Type for the triangular matrix + DiagType kDiagType, + /// Element type for internal accumulation + typename ElementAccumulator_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Operator class tag + typename OperatorClass_, + /// Tag indicating architecture to tune for + typename ArchTag_, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape_, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape_, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape_, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false + > +struct DefaultTrmm; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output (OperatorClass TensorOp) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Side Mode for the kernel + SideMode kSideMode, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Diag Type for the triangular matrix + DiagType kDiagType, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator + > +struct DefaultTrmm { + + static cutlass::arch::CacheOperation::Kind const CacheOpA = + ((sizeof_bits::value * kAlignmentA) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + static cutlass::arch::CacheOperation::Kind const CacheOpB = + ((sizeof_bits::value * kAlignmentB) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, + Stages, Operator, false, CacheOpA, CacheOpB>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, kSideMode, kFillMode, kDiagType, AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, kSideMode, FillMode::kFull, DiagType::kInvalid, AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages, SharedMemoryClearOption::kZfill>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output, right side mode (OperatorClass TensorOp) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Diag Type for the triangular matrix + DiagType kDiagType, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator + > +struct DefaultTrmm { + + static cutlass::arch::CacheOperation::Kind const CacheOpA = + ((sizeof_bits::value * kAlignmentA) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + static cutlass::arch::CacheOperation::Kind const CacheOpB = + ((sizeof_bits::value * kAlignmentB) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, + Stages, Operator, false, CacheOpA, CacheOpB>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, SideMode::kRight, FillMode::kFull, DiagType::kInvalid, AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, SideMode::kRight, kFillMode, kDiagType, AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages, SharedMemoryClearOption::kZfill>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output with unit diagonal (OperatorClass TensorOp) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Side Mode for the kernel + SideMode kSideMode, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator + > +struct DefaultTrmm { + + static cutlass::arch::CacheOperation::Kind const CacheOpA = + ((sizeof_bits::value * kAlignmentA) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + static cutlass::arch::CacheOperation::Kind const CacheOpB = + ((sizeof_bits::value * kAlignmentB) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, + Stages, Operator, false, CacheOpA, CacheOpB>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, kSideMode, kFillMode, DiagType::kUnit, AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, kSideMode, FillMode::kFull, DiagType::kInvalid, AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaBlas3Multistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages, SharedMemoryClearOption::kZfill>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization for row-major output, right side mode, unit diagonal (OperatorClass TensorOp) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the multistage mainloop + int Stages, + /// Operation perfomed by GEMM + typename Operator + > +struct DefaultTrmm { + + static cutlass::arch::CacheOperation::Kind const CacheOpA = + ((sizeof_bits::value * kAlignmentA) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + static cutlass::arch::CacheOperation::Kind const CacheOpB = + ((sizeof_bits::value * kAlignmentB) == 128) + ? cutlass::arch::CacheOperation::Global + : cutlass::arch::CacheOperation::Always; + + // Define the MmaCore components + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, + ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, + Stages, Operator, false, CacheOpA, CacheOpB>; + + // Define iterators over tiles from the A operand + using ThreadMapA = typename MmaCore::IteratorThreadMapA; + using AccessTypeA = cutlass::Array; + + using IteratorA = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementA, LayoutA, 1, ThreadMapA, SideMode::kRight, FillMode::kFull, DiagType::kInvalid, AccessTypeA>; + + // Define iterators over tiles from the B operand + using ThreadMapB = typename MmaCore::IteratorThreadMapB; + using AccessTypeB = cutlass::Array; + + using IteratorB = + cutlass::transform::threadblock::PredicatedTileAccessIteratorTriangularMatrix< + cutlass::MatrixShape, + ElementB, LayoutB, 0, ThreadMapB, SideMode::kRight, kFillMode, DiagType::kUnit, AccessTypeB>; + + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = cutlass::gemm::threadblock::MmaBlas3Multistage< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB, + MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor, + typename MmaCore::MmaPolicy, Stages, SharedMemoryClearOption::kZfill>; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/ell_mma_multistage.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/ell_mma_multistage.h new file mode 100644 index 0000000000000000000000000000000000000000..fa0945a441f83cc051569de62387d4822053c7eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/ell_mma_multistage.h @@ -0,0 +1,648 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a multistage threadblock-scoped Blocked-Ell MMA. +*/ + +#pragma once + + +#include "cutlass/aligned_buffer.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/threadblock/mma_base.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Cache operation for operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Cache operation for operand B + cutlass::arch::CacheOperation::Kind CacheOpB, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Used for partial specialization + typename Enable = bool> +class EllMmaMultistage : + public MmaBase { +public: + ///< Base class + using Base = MmaBase; + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + ///< Iterates over tiles of A operand in global memory + using IteratorA = IteratorA_; + ///< Iterates over tiles of B operand in global memory + using IteratorB = IteratorB_; + ///< Data type of accumulator matrix + using ElementC = ElementC_; + ///< Layout of accumulator matrix + using LayoutC = LayoutC_; + ///< Policy describing tuning details + using Policy = Policy_; + + using SmemIteratorA = SmemIteratorA_; + using SmemIteratorB = SmemIteratorB_; + + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + using EllIterator = typename cutlass::transform::threadblock::ell::Iterator; + + // + // Dependent types + // + + /// Fragment of accumulator tile + using FragmentC = typename Policy::Operator::FragmentC; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Minimum architecture is Sm80 to support cp.async + using ArchTag = arch::Sm80; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = Operator::kTransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = Operator::kTransformB; + + /// Internal structure exposed for introspection. + struct Detail { + + static_assert(Base::kWarpGemmIterations > 1, + "The pipelined structure requires at least two warp-level " + "GEMM operations."); + + /// Number of cp.async instructions to load one stage of operand A + static int const AsyncCopyIterationsPerStageA = + IteratorA::ThreadMap::Iterations::kCount; + + /// Number of cp.async instructions to load one stage of operand B + static int const AsyncCopyIterationsPerStageB = + IteratorB::ThreadMap::Iterations::kCount; + + /// Number of stages + static int const kStages = Stages; + + /// Number of cp.async instructions to load on group of operand A + static int const kAccessesPerGroupA = + (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + + /// Number of cp.async instructions to load on group of operand B + static int const kAccessesPerGroupB = + (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + }; + + private: + + using WarpLoadedFragmentA = typename Operator::FragmentA; + using WarpLoadedFragmentB = typename Operator::FragmentB; + using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; + using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; + + private: + + // + // Data members + // + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + EllMmaMultistage( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + typename Base::SharedStorage &shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx + ): + Base(shared_storage, thread_idx, warp_idx, lane_idx), + smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), + smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) + { + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset( + {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset( + {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); + } + + template + CUTLASS_DEVICE + void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B, EllIterator &ell_iter, + int group_start_A = 0, int group_start_B = 0) { + iterator_A.set_iteration_index(group_start_A * + IteratorA::kAccessesPerVector); + this->smem_iterator_A_.set_iteration_index(group_start_A); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { + if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_A.get(); + bool is_valid = iterator_A.valid(); + + if (!is_A_sparse){ + if (is_offset_constant){ + auto ell_offset = ell_iter.get_offset_fast(); + is_valid = is_valid && (ell_offset >= 0); + gmem_ptr += ell_offset * sizeof(IteratorA::Element) / kSrcBytes; + } else { + int k_offset = iterator_A.get_k(); + auto ell_offset = ell_iter.get_offset(k_offset); + is_valid = is_valid && (ell_offset >= 0); + gmem_ptr += (ell_offset * sizeof(IteratorA::Element)) / kSrcBytes; + } + } + + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, is_valid); + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + } + + iterator_B.set_iteration_index(group_start_B * + IteratorB::kAccessesPerVector); + this->smem_iterator_B_.set_iteration_index(group_start_B); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { + if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_B_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_B.get(); + bool is_valid = iterator_B.valid(); + + if (is_A_sparse){ + if (is_offset_constant){ + auto ell_offset = ell_iter.get_offset_fast(); + is_valid = is_valid && (ell_offset >= 0); + gmem_ptr += ell_offset * sizeof(IteratorB::Element) / kSrcBytes; + } else { + int k_offset = iterator_B.get_k(); + auto ell_offset = ell_iter.get_offset(k_offset); + is_valid = is_valid && (ell_offset >= 0); + gmem_ptr += ( ell_offset * sizeof(IteratorB::Element)) / kSrcBytes; + } + } + + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, is_valid); + + ++iterator_B; + } + ++this->smem_iterator_B_; + } + } + } + + + /// Perform a threadblock-scoped matrix multiply-accumulate + template + CUTLASS_DEVICE + void operator()( + ///< problem size of GEMM + int gemm_k_iterations, + ///< destination accumulator tile + FragmentC &accum, + ///< iterator over A operand in global memory + IteratorA iterator_A, + ///< iterator over B operand in global memory + IteratorB iterator_B, + ///< initial value of accumulator + FragmentC const &src_accum, + EllIterator &ell_iterator + ) { + // + // Prologue + // + + // Issue several complete stages + CUTLASS_PRAGMA_UNROLL + for (int stage = 0; stage < Base::kStages - 1; + ++stage, --gemm_k_iterations) { + + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + + iterator_A.set_iteration_index(0); + this->smem_iterator_A_.set_iteration_index(0); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + int const kSrcBytes = + sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + auto gmem_ptr = iterator_A.get(); + bool is_valid = iterator_A.valid(); + + if (!is_A_sparse){ + if (is_offset_constant){ + auto ell_offset = ell_iterator.get_offset_fast(); + is_valid = is_valid && (ell_offset >= 0); + gmem_ptr += ell_offset * sizeof(IteratorA::Element) / kSrcBytes; + } else { + int k_offset = iterator_A.get_k(); + auto ell_offset = ell_iterator.get_offset(k_offset); + is_valid = is_valid && (ell_offset >= 0); + gmem_ptr += (ell_offset * sizeof(IteratorA::Element)) / kSrcBytes; + } + } + + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, is_valid); + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + + iterator_B.set_iteration_index(0); + this->smem_iterator_B_.set_iteration_index(0); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_B_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + int const kSrcBytes = + sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + auto gmem_ptr = iterator_B.get(); + bool is_valid = iterator_B.valid(); + + if (is_A_sparse){ + if (is_offset_constant){ + auto ell_offset = ell_iterator.get_offset_fast(); + is_valid = is_valid && (ell_offset >= 0); + gmem_ptr += ell_offset * sizeof(IteratorB::Element) / kSrcBytes; + } else { + int k_offset = iterator_B.get_k(); + auto ell_offset = ell_iterator.get_offset(k_offset); + is_valid = is_valid && (ell_offset >= 0); + gmem_ptr += ( ell_offset * sizeof(IteratorB::Element)) / kSrcBytes; + } + } + + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, is_valid); + + ++iterator_B; + } + + ++this->smem_iterator_B_; + } + + // Move to the next stage + iterator_A.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + ++ell_iterator; + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + + // Defines the boundary of a stage of cp.async. + cutlass::arch::cp_async_fence(); + } + + // Perform accumulation in the 'd' output operand + accum = src_accum; + + // Waits until kStages-2 stages have committed. + cutlass::arch::cp_async_wait(); + __syncthreads(); + + // Pair of fragments used to overlap shared memory loads and math + // instructions + WarpLoadedFragmentA warp_loaded_frag_A[2]; + WarpLoadedFragmentB warp_loaded_frag_B[2]; + WarpTransformedFragmentA warp_transformed_frag_A[2]; + WarpTransformedFragmentB warp_transformed_frag_B[2]; + + Operator warp_mma; + + this->warp_tile_iterator_A_.set_kgroup_index(0); + this->warp_tile_iterator_B_.set_kgroup_index(0); + + this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + + if (is_A_sparse){ + iterator_A.ell_add_mask(ell_iterator.get_blocksize()); + } + else { + iterator_B.ell_add_mask(ell_iterator.get_blocksize()); + } + + int smem_write_stage_idx = Base::kStages - 1; + int smem_read_stage_idx = 0; + + warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], + warp_loaded_frag_A[0], warp_loaded_frag_B[0]); + + // tf32x3 kernels use staging accumulation. warp_mma uses a temporary + // accumulator and this temporary accumulator is added to the final + // accumulator once in every mainloop iteration. + plus plus_accum; + + FragmentC tmp_accum; + + if (platform::is_same::value + || platform::is_same::value) { + + tmp_accum.clear(); + } + + // + // Mainloop + // + + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > (-Base::kStages + 1);) { + // + // Loop over GEMM K dimension + // + + // Computes a warp-level GEMM on data held in shared memory + // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; + ++warp_mma_k) { + + // Load warp-level tiles from shared memory, wrapping to k offset if + // this is the last group as the case may be. + + this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + + this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + if (warp_mma_k > 0) + warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + warp_loaded_frag_A[warp_mma_k % 2], + warp_loaded_frag_B[warp_mma_k % 2]); + + if (platform::is_same::value + || platform::is_same::value) { + + warp_mma( + tmp_accum, + warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + tmp_accum + ); + + if (warp_mma_k == 0) { + accum = plus_accum(accum, tmp_accum); + tmp_accum.clear(); + } + } else { + warp_mma( + accum, + warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + accum + ); + } + + // Issue global->shared copies for the this stage + if (warp_mma_k < Base::kWarpGemmIterations - 1) { + int group_start_iteration_A, group_start_iteration_B; + + group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA; + group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB; + + copy_tiles_and_advance( + iterator_A, iterator_B, ell_iterator, group_start_iteration_A, + group_start_iteration_B); + } + + if (warp_mma_k + 2 == Base::kWarpGemmIterations) { + int group_start_iteration_A, group_start_iteration_B; + group_start_iteration_A = + (warp_mma_k + 1) * Detail::kAccessesPerGroupA; + group_start_iteration_B = + (warp_mma_k + 1) * Detail::kAccessesPerGroupB; + + copy_tiles_and_advance( + iterator_A, iterator_B, ell_iterator, group_start_iteration_A, + group_start_iteration_B); + + // Inserts a memory fence between stages of cp.async instructions. + cutlass::arch::cp_async_fence(); + + // Waits until kStages-2 stages have committed. + arch::cp_async_wait(); + __syncthreads(); + + // Move to the next stage + iterator_A.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + ++ell_iterator; + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + + // Add negative offsets to return iterators to the 'start' of the + // circular buffer in shared memory + if (smem_write_stage_idx == (Base::kStages - 1)) { + this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); + this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + smem_write_stage_idx = 0; + } else { + ++smem_write_stage_idx; + } + + if (smem_read_stage_idx == (Base::kStages - 1)) { + this->warp_tile_iterator_A_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations}); + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations, + 0}); + smem_read_stage_idx = 0; + } else { + ++smem_read_stage_idx; + } + + --gemm_k_iterations; + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + } + + // Do any conversions feeding the first stage at the end of the loop so + // we can start right away on mma instructions + if (warp_mma_k + 1 == Base::kWarpGemmIterations) + warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], + warp_transformed_frag_B[(warp_mma_k + 1) % 2], + warp_loaded_frag_A[(warp_mma_k + 1) % 2], + warp_loaded_frag_B[(warp_mma_k + 1) % 2]); + } + + } + + if (platform::is_same::value + || platform::is_same::value) { + accum = plus_accum(accum, tmp_accum); + } + + + // Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop + cutlass::arch::cp_async_fence(); + cutlass::arch::cp_async_wait<0>(); + __syncthreads(); + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/ell_mma_pipelined.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/ell_mma_pipelined.h new file mode 100644 index 0000000000000000000000000000000000000000..8b1c2c4378194fc27c35cb6322d847677a64de83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/ell_mma_pipelined.h @@ -0,0 +1,376 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a double-buffered threadblock-scoped Blocked-Ell MMA. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/aligned_buffer.h" +#include "cutlass/numeric_conversion.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/threadblock/mma_base.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Transformation applied to A operand + typename TransformA_ = NumericArrayConverter< + typename SmemIteratorA_::Element, + typename IteratorA_::Element, + IteratorA_::Fragment::kElements>, + /// + /// Transformation applied to B operand + typename TransformB_ = NumericArrayConverter< + typename SmemIteratorB_::Element, + typename IteratorB_::Element, + IteratorB_::Fragment::kElements>, + /// Used for partial specialization + typename Enable = bool +> +class EllMmaPipelined : public MmaBase { +public: + + ///< Base class + using Base = MmaBase; + + using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory + using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory + using ElementC = ElementC_; ///< Data type of accumulator matrix + using LayoutC = LayoutC_; ///< Layout of accumulator matrix + using Policy = Policy_; ///< Policy describing tuning details + + using SmemIteratorA = SmemIteratorA_; + using SmemIteratorB = SmemIteratorB_; + + using TransformA = TransformA_; + using TransformB = TransformB_; + + // + // Dependent types + // + + /// Fragment of operand A loaded from global memory + using FragmentA = typename IteratorA::Fragment; + + /// Fragment of operand B loaded from global memory + using FragmentB = typename IteratorB::Fragment; + + /// Fragment of accumulator tile + using FragmentC = typename Policy::Operator::FragmentC; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Obtain the arch tag from the warp-level operator + using ArchTag = typename Policy::Operator::ArchTag; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = Operator::kTransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = Operator::kTransformB; + + // staticaly assert kStages for EllMmaPipelined is two (Double-buffered pipeline) + static_assert((Base::kStages==2), "EllMmaPipelined requires kStages set to value 2"); + +private: + + using WarpFragmentA = typename Operator::FragmentA; + using WarpFragmentB = typename Operator::FragmentB; + +protected: + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + + using EllIterator = typename cutlass::transform::threadblock::ell::Iterator; + +public: + /// Construct from tensor references + CUTLASS_DEVICE + EllMmaPipelined( + typename Base::SharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM + int thread_idx, ///< ID within the threadblock + int warp_idx, ///< ID of warp + int lane_idx ///< ID of each thread within a warp + ): + Base(shared_storage, thread_idx, warp_idx, lane_idx), + smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), + smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) { + + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); + + } + + /// Perform a threadblock-scoped matrix multiply-accumulate + template + CUTLASS_DEVICE + void operator()( + int gemm_k_iterations, ///< number of iterations of the mainloop + FragmentC &accum, ///< destination accumulator tile + IteratorA iterator_A, ///< iterator over A operand in global memory + IteratorB iterator_B, ///< iterator over B operand in global memory + FragmentC const &src_accum, ///< source accumulator tile + EllIterator &ell_iterator, + TransformA transform_A = TransformA(), ///< transformation applied to A fragment + TransformB transform_B = TransformB()) { ///< transformation applied to B fragment + + // + // Prologue + // + + // Perform accumulation in the 'd' output operand + accum = src_accum; + + FragmentA tb_frag_A; + FragmentB tb_frag_B; + + tb_frag_A.clear(); + tb_frag_B.clear(); + + // load sparse matrix + if (is_A_sparse){ + iterator_A.load(tb_frag_A); + } else { + iterator_B.load(tb_frag_B); + } + + // load dense matrix + if (is_offset_constant){ + if (is_A_sparse){ + iterator_B.load_with_ell_index_fast(tb_frag_B, ell_iterator); + } else { + iterator_A.load_with_ell_index_fast(tb_frag_A, ell_iterator); + } + } else { + if (is_A_sparse){ + iterator_B.load_with_ell_index(tb_frag_B, ell_iterator); + } else { + iterator_A.load_with_ell_index(tb_frag_A, ell_iterator); + } + } + + ++iterator_A; + ++iterator_B; + ++ell_iterator; + + this->smem_iterator_A_.store(transform_A(tb_frag_A)); + this->smem_iterator_B_.store(transform_B(tb_frag_B)); + + ++this->smem_iterator_A_; + ++this->smem_iterator_B_; + + __syncthreads(); + + // Pair of fragments used to overlap shared memory loads and math instructions + WarpFragmentA warp_frag_A[2]; + WarpFragmentB warp_frag_B[2]; + + this->warp_tile_iterator_A_.set_kgroup_index(0); + this->warp_tile_iterator_B_.set_kgroup_index(0); + + this->warp_tile_iterator_A_.load(warp_frag_A[0]); + this->warp_tile_iterator_B_.load(warp_frag_B[0]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + Operator warp_mma; + + int smem_write_stage_idx = 1; + + // Avoid reading out of bounds + iterator_A.clear_mask(gemm_k_iterations <= 1); + iterator_B.clear_mask(gemm_k_iterations <= 1); + + if (is_A_sparse){ + iterator_A.ell_add_mask(ell_iterator.get_blocksize()); + } + else { + iterator_B.ell_add_mask(ell_iterator.get_blocksize()); + } + + // Issue loads during the first warp-level matrix multiply-add *AFTER* issuing + // shared memory loads (which have the tightest latency requirement). + + // + // Mainloop + // + + // Note: The main loop does not support Base::kWarpGemmIterations == 2. + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > 0; --gemm_k_iterations) { + // + // Loop over GEMM K dimension + // + + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { + + // Load warp-level tiles from shared memory, wrapping to k offset if this is the last group + // as the case may be. + + if (warp_mma_k == Base::kWarpGemmIterations - 1) { + + // Write fragments to shared memory + this->smem_iterator_A_.store(transform_A(tb_frag_A)); + + this->smem_iterator_B_.store(transform_B(tb_frag_B)); + + __syncthreads(); + + ++this->smem_iterator_A_; + ++this->smem_iterator_B_; + + // Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory + if (smem_write_stage_idx == 1) { + this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); + this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + } + else { + this->warp_tile_iterator_A_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, + 0}); + } + + smem_write_stage_idx ^= 1; + } + + this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + + this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + if (warp_mma_k == 0) { + // load sparse matrix + if (is_A_sparse){ + iterator_A.load(tb_frag_A); + } else { + iterator_B.load(tb_frag_B); + } + + // load dense matrix + if (is_offset_constant){ + if (is_A_sparse){ + iterator_B.load_with_ell_index_fast(tb_frag_B, ell_iterator); + } else { + iterator_A.load_with_ell_index_fast(tb_frag_A, ell_iterator); + } + } else { + if (is_A_sparse){ + iterator_B.load_with_ell_index(tb_frag_B, ell_iterator); + } else { + iterator_A.load_with_ell_index(tb_frag_A, ell_iterator); + } + } + + ++iterator_A; + ++iterator_B; + ++ell_iterator; + + // Avoid reading out of bounds if this was the last loop iteration + iterator_A.clear_mask(gemm_k_iterations <= 2); + iterator_B.clear_mask(gemm_k_iterations <= 2); + } + + warp_mma(accum, warp_frag_A[warp_mma_k % 2], + warp_frag_B[warp_mma_k % 2], accum); + } + } + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/gemv.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/gemv.h new file mode 100644 index 0000000000000000000000000000000000000000..f0a4b1db500bb650ee92c15646d6d350e5d56984 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/gemv.h @@ -0,0 +1,147 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Template for a threadblock-scoped GEMV kernel. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/gemm/gemm.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix-vector product using SIMT math instructions. +template < + class Core_ //< GemvCore +> +class Gemv { +public: + using Shape = typename Core_::Shape; + + /// The MMA operator that computes GEMV + using Operator = typename Core_::Operator; + + /// Iterates over A in global memory + using IteratorA = typename Core_::IteratorA; + + /// Iterates over B in global memory + using IteratorB = typename Core_::IteratorB; + + /// Fragment of operand C loaded from global memory + using IteratorC = typename Core_::IteratorC; + + /// Fragment of operand A loaded from global memory + using FragmentA = typename IteratorA::Fragment; + + /// Fragment of operand B loaded from global memory + using FragmentB = typename IteratorB::Fragment; + + /// Fragment of operand accumulator loaded/stored to global memory + using FragmentC = typename Operator::FragmentC; + + /// Shape of the per-thread GEMV operation + using ThreadShape = typename Core_::ThreadShape; + +public: + CUTLASS_DEVICE + Gemv() { } + + CUTLASS_DEVICE + void operator()( + GemmCoord const &problem_size, ///< problem size of batched GEMV + FragmentC &accum, ///< destination accumulator tile + IteratorA iterator_A, ///< iterator over A operand in global memory + IteratorB iterator_B, ///< iterator over B operand in global memory + FragmentC const &src_accum) { ///< source accumualtor tile + + // + // Prologue + // + + FragmentA frag_A; + FragmentB frag_B; + frag_A.clear(); + frag_B.clear(); + + iterator_A.load(frag_A); + iterator_B.load(frag_B); + ++iterator_A; + ++iterator_B; + + // + // Mainloop + // + Operator thread_mma; + int gemm_k = problem_size.k(); + + if (gemm_k < Shape::kK) + { + iterator_A.clear_mask(); + iterator_B.clear_mask(); + } + + // iterate over K to accumulate result + CUTLASS_GEMM_LOOP + for (; gemm_k > 0; gemm_k -= Shape::kK) { + thread_mma(accum, frag_A, frag_B, accum); + + iterator_A.load(frag_A); + iterator_B.load(frag_B); + ++iterator_A; + ++iterator_B; + + if (gemm_k < Shape::kK) + { + iterator_A.clear_mask(); + iterator_B.clear_mask(); + } + } + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/index_remat.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/index_remat.h new file mode 100644 index 0000000000000000000000000000000000000000..1e245683256aa5b6c2fd3e75c003af3a52e6e0b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/index_remat.h @@ -0,0 +1,107 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Helpers for rematerializing indices/dimensions in the thread hierarchy from special registers +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Helper to rematerialize block Idx. Reduces register liveness. +CUTLASS_DEVICE +int RematerializeThreadIdxX() { + return threadIdx.x; +} + +/// Helper to rematerialize block Idx. Reduces register liveness. +CUTLASS_DEVICE +int RematerializeThreadIdxY() { + return threadIdx.y; +} + +/// Helper to rematerialize block Idx. Reduces register liveness. +CUTLASS_DEVICE +int RematerializeThreadIdxZ() { + return threadIdx.z; +} + +/// Helper to rematerialize block Idx. Reduces register liveness. +CUTLASS_DEVICE +int RematerializeBlockIdxX() { + return blockIdx.x; +} + +/// Helper to rematerialize block Idx. Reduces register liveness. +CUTLASS_DEVICE +int RematerializeBlockIdxY() { + return blockIdx.y; +} + +/// Helper to rematerialize block Idx. Reduces register liveness. +CUTLASS_DEVICE +int RematerializeBlockIdxZ() { + return blockIdx.z; +} + +/// Helper to rematerialize block Dim. Reduces register liveness. +CUTLASS_DEVICE +int RematerializeBlockDimX() { + return blockDim.x; +} + +/// Helper to rematerialize block Dim. Reduces register liveness. +CUTLASS_DEVICE +int RematerializeBlockDimY() { + return blockDim.y; +} + +/// Helper to rematerialize block Dim. Reduces register liveness. +CUTLASS_DEVICE +int RematerializeBlockDimZ() { + return blockDim.z; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_base.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_base.h new file mode 100644 index 0000000000000000000000000000000000000000..524fdf93039ef1c73ae2a0a0278d970862468470 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_base.h @@ -0,0 +1,236 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. +*/ + +#pragma once + +#include "cutlass/tensor_ref.h" +#include "cutlass/aligned_buffer.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Policy object describing MmaTensorOp +template < + /// Warp-level GEMM operator (concept: gemm::warp::Mma) + typename Operator_, + /// Padding used for A operand in shared memory (concept: MatrixShape) + typename SmemPaddingA_, + /// Padding used for B operand in shared memory (concept: MatrixShape) + typename SmemPaddingB_, + /// Number of partitions of K dimension of GEMM + int PartitionsK = 1> +struct MmaPolicy { + /// Warp-level GEMM operator (concept: gemm::warp::MmaTensorOp or gemm::warp::MmaSimt) + using Operator = Operator_; + + /// Padding used for A operand in shared memory + using SmemPaddingA = SmemPaddingA_; + + /// Padding used for B operand in shared memory + using SmemPaddingB = SmemPaddingB_; + + /// Number of partitions of K dimension + static int const kPartitionsK = PartitionsK; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Used for partial specialization + typename Enable = bool> +class MmaBase { + public: + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + + ///< Policy describing tuning details + using Policy = Policy_; + + // + // Dependent types + // + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Shape describing the overall GEMM computed from shared memory + /// by each warp. + using WarpGemm = typename Policy::Operator::Shape; + + /// Shape describing the number of warps filling the CTA + using WarpCount = GemmShape; + + /// Number of warp-level GEMM oeprations + static int const kWarpGemmIterations = + (WarpGemm::kK / Operator::Policy::MmaShape::kK); + + /// Number of stages + static int const kStages = Stages; + + /// Tensor reference to the A operand + using TensorRefA = TensorRef; + + /// Tensor reference to the B operand + using TensorRefB = TensorRef; + + static_assert(kWarpGemmIterations > 1, + "The pipelined structure requires at least two warp-level " + "GEMM operations."); + + static_assert((kWarpGemmIterations % 2) == 0, + "Inner loop iteration must be an even number."); + + // + // Nested structs + // + + /// Shared storage object needed by threadblock-scoped GEMM + class SharedStorage { + public: + // + // Type definitions + // + + /// Shape of the A matrix operand in shared memory + using ShapeA = MatrixShape; + + /// Shape of the B matrix operand in shared memory + using ShapeB = + MatrixShape; + + public: + // + // Data members + // + + /// Buffer for A operand + AlignedBuffer operand_A; + + /// Buffer for B operand + AlignedBuffer operand_B; + + public: + + // + // Methods + // + + /// Returns a layout object for the A matrix + CUTLASS_DEVICE + static typename Operator::LayoutA LayoutA() { + return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn}); + } + + /// Returns a layout object for the B matrix + CUTLASS_HOST_DEVICE + static typename Operator::LayoutB LayoutB() { + return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn}); + } + + /// Returns a TensorRef to the A operand + CUTLASS_HOST_DEVICE + TensorRefA operand_A_ref() { + return TensorRefA{operand_A.data(), LayoutA()}; + } + + /// Returns a TensorRef to the B operand + CUTLASS_HOST_DEVICE + TensorRefB operand_B_ref() { + return TensorRefB{operand_B.data(), LayoutB()}; + } + }; + + protected: + + // + // Data members + // + + /// Iterator to load a warp-scoped tile of A operand from shared memory + typename Operator::IteratorA warp_tile_iterator_A_; + + /// Iterator to load a warp-scoped tile of B operand from shared memory + typename Operator::IteratorB warp_tile_iterator_B_; + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + MmaBase( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + SharedStorage &shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx + ): + warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx), + warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) { + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_blas3_multistage.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_blas3_multistage.h new file mode 100644 index 0000000000000000000000000000000000000000..214916d9b573b14842d6f79c649ef5762fff3c1c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_blas3_multistage.h @@ -0,0 +1,707 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. + Used by BLAS3 kernels that need to treat diagonal elements of a input iterator as a special case. + +*/ + +#pragma once + +#include "cutlass/aligned_buffer.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/threadblock/mma_base.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Cache operation for operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Cache operation for operand B + cutlass::arch::CacheOperation::Kind CacheOpB, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kZfill, + /// Blas3 computation mode + BlasMode BlasMode_ = BlasMode::kTriangular, + /// Used for partial specialization + typename Enable = bool> +class MmaBlas3Multistage : + public MmaBase { +public: + ///< Base class + using Base = MmaBase; + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + ///< Iterates over tiles of A operand in global memory + using IteratorA = IteratorA_; + ///< Iterates over tiles of B operand in global memory + using IteratorB = IteratorB_; + ///< Data type of accumulator matrix + using ElementC = ElementC_; + ///< Layout of accumulator matrix + using LayoutC = LayoutC_; + ///< Policy describing tuning details + using Policy = Policy_; + ///< Blas Mode + static BlasMode const kBlasMode = BlasMode_; + + using SmemIteratorA = SmemIteratorA_; + using SmemIteratorB = SmemIteratorB_; + + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + // + // Dependent types + // + + /// Fragment of accumulator tile + using FragmentC = typename Policy::Operator::FragmentC; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Minimum architecture is Sm80 to support cp.async + using ArchTag = arch::Sm80; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = Operator::kTransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = Operator::kTransformB; + + /// Internal structure exposed for introspection. + struct Detail { + + /// Number of cp.async instructions to load one stage of operand A + static int const AsyncCopyIterationsPerStageA = + IteratorA::ThreadMap::Iterations::kCount; + + /// Number of cp.async instructions to load one stage of operand B + static int const AsyncCopyIterationsPerStageB = + IteratorB::ThreadMap::Iterations::kCount; + + /// Number of stages + static int const kStages = Stages; + + /// Number of cp.async instructions to load on group of operand A + static int const kAccessesPerGroupA = + (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + + /// Number of cp.async instructions to load on group of operand B + static int const kAccessesPerGroupB = + (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + }; + + private: + + using WarpLoadedFragmentA = typename Operator::FragmentA; + using WarpLoadedFragmentB = typename Operator::FragmentB; + using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; + using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; + + private: + + // + // Data members + // + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + MmaBlas3Multistage( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + typename Base::SharedStorage &shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx + ): + Base(shared_storage, thread_idx, warp_idx, lane_idx), + smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), + smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) + { + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset( + {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset( + {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); + } + + CUTLASS_DEVICE + void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B, + int group_start_A = 0, int group_start_B = 0) { + iterator_A.set_iteration_index(group_start_A * + IteratorA::kAccessesPerVector); + this->smem_iterator_A_.set_iteration_index(group_start_A); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { + if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_A.get(); + bool isvalid = iterator_A.valid(); + + if (isvalid && iterator_A.getOnDiag()) { + // Elements that are on diagonal + if (kBlasMode == BlasMode::kHermitian && cutlass::is_complex::value) { + /* Copy real part from gmem, write zero for imag part in smem */ + /* The following logic to determine kSizeRealBytes is so that compiler doesn't complain when + * compiling for not complex datatype and using half the size for cp_async_zfill */ + int const kSizeRealBytes = (platform::is_same>::value) ? 8 : 4; + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, true); + cutlass::arch::cp_async_diag( + reinterpret_cast (dst_ptr + v) + kSizeRealBytes); + } else { + /* Write one (1) directly to smem*/ + cutlass::arch::cp_async_diag(dst_ptr + v); + } + } else { + // Elements that are not of diagonal + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, isvalid); + } + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + } + + iterator_B.set_iteration_index(group_start_B * + IteratorB::kAccessesPerVector); + this->smem_iterator_B_.set_iteration_index(group_start_B); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { + if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_B_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_B.get(); + bool isvalid = iterator_B.valid(); + + if (isvalid && iterator_B.getOnDiag()) { + // Elements that are on diagonal + if (kBlasMode == BlasMode::kHermitian && cutlass::is_complex::value) { + /* Copy real part from gmem, write zero for imag part in smem */ + int const kSizeRealBytes = (platform::is_same>::value) ? 8 : 4; + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, true); + cutlass::arch::cp_async_diag( + reinterpret_cast (dst_ptr + v) + kSizeRealBytes); + } else { + /* Write one (1) directly to smem*/ + cutlass::arch::cp_async_diag(dst_ptr + v); + } + } else { + // Elements that are not of diagonal + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, isvalid); + } + + ++iterator_B; + } + ++this->smem_iterator_B_; + } + } + } + + /// Perform a threadblock-scoped matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + ///< problem size of GEMM + int gemm_k_iterations, + ///< destination accumulator tile + FragmentC &accum, + ///< iterator over A operand in global memory + IteratorA iterator_A, + ///< iterator over B operand in global memory + IteratorB iterator_B, + ///< initial value of accumulator + FragmentC const &src_accum) { + + // + // Prologue + // + + // Issue several complete stages + CUTLASS_PRAGMA_UNROLL + for (int stage = 0; stage < Base::kStages - 1; + ++stage, --gemm_k_iterations) { + + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + + iterator_A.set_iteration_index(0); + this->smem_iterator_A_.set_iteration_index(0); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + int const kSrcBytes = + sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + auto gmem_ptr = iterator_A.get(); + bool isvalid = iterator_A.valid(); + + if (isvalid && iterator_A.getOnDiag()) { + // Elements that are on diagonal + if (kBlasMode == BlasMode::kHermitian && cutlass::is_complex::value) { + /* Copy real part from gmem, write zero for imag part in smem */ + int const kSizeRealBytes = (platform::is_same>::value) ? 8 : 4; + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, true); + cutlass::arch::cp_async_diag( + reinterpret_cast (dst_ptr + v) + kSizeRealBytes); + } else { + /* Write one (1) directly to smem*/ + cutlass::arch::cp_async_diag(dst_ptr + v); + } + } else { + // Elements that are not of diagonal + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, isvalid); + } + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + + iterator_B.set_iteration_index(0); + this->smem_iterator_B_.set_iteration_index(0); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_B_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + int const kSrcBytes = + sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + auto gmem_ptr = iterator_B.get(); + bool isvalid = iterator_B.valid(); + + if (isvalid && iterator_B.getOnDiag()) { + // Elements that are on diagonal + if (kBlasMode == BlasMode::kHermitian && cutlass::is_complex::value) { + /* Copy real part from gmem, write zero for imag part in smem */ + int const kSizeRealBytes = (platform::is_same>::value) ? 8 : 4; + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, true); + cutlass::arch::cp_async_diag( + reinterpret_cast (dst_ptr + v) + kSizeRealBytes); + } else { + /* Write one (1) directly to smem*/ + cutlass::arch::cp_async_diag(dst_ptr + v); + } + } else { + // Elements that are not of diagonal + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, isvalid); + } + + ++iterator_B; + } + + ++this->smem_iterator_B_; + } + + // Move to the next stage + iterator_A.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + + // Defines the boundary of a stage of cp.async. + cutlass::arch::cp_async_fence(); + } + + // Perform accumulation in the 'd' output operand + accum = src_accum; + + // + // Clear the remaining tiles of SMEM. This is a functional requirement for some kernels + // so that all accumulator elements outside the GEMM footprint are zero. + // + + if (SharedMemoryClear == SharedMemoryClearOption::kClearLastStage) { + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA last_smem_iterator_A(this->smem_iterator_A_); + + typename IteratorA::AccessType zero_A; + zero_A.clear(); + + last_smem_iterator_A.set_iteration_index(0); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { + + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + last_smem_iterator_A.get()); + + *dst_ptr = zero_A; + + ++last_smem_iterator_A; + } + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB last_smem_iterator_B(this->smem_iterator_B_); + typename IteratorB::AccessType zero_B; + + zero_B.clear(); + last_smem_iterator_B.set_iteration_index(0); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { + + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + last_smem_iterator_B.get()); + + *dst_ptr = zero_B; + + ++last_smem_iterator_B; + } + } + + // Waits until kStages-2 stages have committed. + cutlass::arch::cp_async_wait(); + __syncthreads(); + + // Pair of fragments used to overlap shared memory loads and math + // instructions + WarpLoadedFragmentA warp_loaded_frag_A[2]; + WarpLoadedFragmentB warp_loaded_frag_B[2]; + WarpTransformedFragmentA warp_transformed_frag_A[2]; + WarpTransformedFragmentB warp_transformed_frag_B[2]; + + Operator warp_mma; + + this->warp_tile_iterator_A_.set_kgroup_index(0); + this->warp_tile_iterator_B_.set_kgroup_index(0); + + this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + + int smem_write_stage_idx = Base::kStages - 1; + int smem_read_stage_idx = 0; + + warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], + warp_loaded_frag_A[0], warp_loaded_frag_B[0]); + + // tf32x3 kernels use staging accumulation. warp_mma uses a temporary + // accumulator and this temporary accumulator is added to the final + // accumulator once in every mainloop iteration. + plus plus_accum; + + FragmentC tmp_accum; + + if (platform::is_same::value + || platform::is_same::value) { + + tmp_accum.clear(); + } + + // + // Mainloop + // + + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > (-Base::kStages + 1);) { + // + // Loop over GEMM K dimension + // + + // Computes a warp-level GEMM on data held in shared memory + // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; + ++warp_mma_k) { + + // Load warp-level tiles from shared memory, wrapping to k offset if + // this is the last group as the case may be. + + this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + + this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + if (warp_mma_k > 0) + warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + warp_loaded_frag_A[warp_mma_k % 2], + warp_loaded_frag_B[warp_mma_k % 2]); + + if (platform::is_same::value + || platform::is_same::value) { + + warp_mma( + tmp_accum, + warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + tmp_accum + ); + + if (warp_mma_k == 0) { + accum = plus_accum(accum, tmp_accum); + tmp_accum.clear(); + } + } else { + warp_mma( + accum, + warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + accum + ); + } + + // Issue global->shared copies for the this stage + if (warp_mma_k < Base::kWarpGemmIterations - 1) { + int group_start_iteration_A, group_start_iteration_B; + + group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA; + group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB; + + copy_tiles_and_advance(iterator_A, iterator_B, group_start_iteration_A, + group_start_iteration_B); + } + + if (warp_mma_k + 2 == Base::kWarpGemmIterations) { + int group_start_iteration_A, group_start_iteration_B; + group_start_iteration_A = + (warp_mma_k + 1) * Detail::kAccessesPerGroupA; + group_start_iteration_B = + (warp_mma_k + 1) * Detail::kAccessesPerGroupB; + + copy_tiles_and_advance(iterator_A, iterator_B, group_start_iteration_A, + group_start_iteration_B); + + // Inserts a memory fence between stages of cp.async instructions. + cutlass::arch::cp_async_fence(); + + // Waits until kStages-2 stages have committed. + arch::cp_async_wait(); + __syncthreads(); + + // Move to the next stage + iterator_A.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + + // Add negative offsets to return iterators to the 'start' of the + // circular buffer in shared memory + if (smem_write_stage_idx == (Base::kStages - 1)) { + this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); + this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + smem_write_stage_idx = 0; + } else { + ++smem_write_stage_idx; + } + + if (smem_read_stage_idx == (Base::kStages - 1)) { + this->warp_tile_iterator_A_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations}); + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations, + 0}); + smem_read_stage_idx = 0; + } else { + ++smem_read_stage_idx; + } + + --gemm_k_iterations; + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + } + + // Do any conversions feeding the first stage at the end of the loop so + // we can start right away on mma instructions + if (warp_mma_k + 1 == Base::kWarpGemmIterations) + warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], + warp_transformed_frag_B[(warp_mma_k + 1) % 2], + warp_loaded_frag_A[(warp_mma_k + 1) % 2], + warp_loaded_frag_B[(warp_mma_k + 1) % 2]); + } + + } + + if (platform::is_same::value + || platform::is_same::value) { + accum = plus_accum(accum, tmp_accum); + } + + if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { + // commit and drain all pending and predicated cp.async pnz from the GEMM mainloop + cutlass::arch::cp_async_fence(); + cutlass::arch::cp_async_wait<0>(); + __syncthreads(); + } + + // Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop + cutlass::arch::cp_async_fence(); + cutlass::arch::cp_async_wait<0>(); + __syncthreads(); + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_layernorm_mainloop_fusion_multistage.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_layernorm_mainloop_fusion_multistage.h new file mode 100644 index 0000000000000000000000000000000000000000..197d0872676cb49b56631b514d8b7c3a147472b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_layernorm_mainloop_fusion_multistage.h @@ -0,0 +1,863 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. + + It loads two loop invariant vectors, mean and var, in the prologue and + stores them in the register file. In the mainloop, it loads two loop + variant vectors, gamma and beta, by using cp.async. We will call + elementwise operation to apply var, mean, gamma, beta between ldmatrix and + warp mma. +*/ + +#pragma once + +#include "cutlass/aligned_buffer.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" +#include "cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h" +#include "cutlass/gemm/threadblock/mma_base.h" +#include "cutlass/gemm/warp/layernorm_scale_bias_transform.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Element type of scale and bias vectors + typename ElementScaleBias_, + /// Layout of scale and bias vectors + typename LayoutScaleBias_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// WarpIterator to load Scale or Bias vector from the shared memory + typename WarpIteratorGammaBeta_, + /// Number of stages, + int Stages, + /// Used for partial specialization + typename Enable = bool> +class MmaMainloopFusionBase { + public: + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + + ///< Element type of scale and bias vectors + using ElementScaleBias = ElementScaleBias_; + + /// Layout of scale and bias vectors + using LayoutScaleBias = LayoutScaleBias_; + + ///< Policy describing tuning details + using Policy = Policy_; + + ///< WarpIterator to load Scale or Bias vector from the shared memory + using WarpIteratorGammaBeta = WarpIteratorGammaBeta_; + + // + // Dependent types + // + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Shape describing the overall GEMM computed from shared memory + /// by each warp. + using WarpGemm = typename Policy::Operator::Shape; + + /// Shape describing the number of warps filling the CTA + using WarpCount = cutlass::gemm::GemmShape; + + /// Number of warp-level GEMM oeprations + static int const kWarpGemmIterations = + (WarpGemm::kK / Operator::Policy::MmaShape::kK); + + /// Number of stages + static int const kStages = Stages; + + /// Tensor reference to the A operand + using TensorRefA = TensorRef; + + /// Tensor reference to the scale and bias vectors + using TensorRefGammaBeta = TensorRef; + + /// Tensor reference to the B operand + using TensorRefB = TensorRef; + + // + // Nested structs + // + + /// Shared storage object needed by threadblock-scoped GEMM + class SharedStorage { + public: + // + // Type definitions + // + + /// Shape of the A matrix operand in shared memory + using ShapeA = MatrixShape; + + /// Shape of the A scale and bias vectors in shared memory + using ShapeGammaBeta = + MatrixShape<1 + Policy::SmemPaddingA::kRow, + 2 * Shape::kK * kStages + Policy::SmemPaddingA::kColumn>; + + /// Shape of the B matrix operand in shared memory + using ShapeB = + MatrixShape; + + public: + // + // Data members + // + + /// Buffer for A operand + AlignedBuffer operand_A; + + /// Buffer for B operand + AlignedBuffer operand_B; + + /// Buffer for A operand Scale and Bias + AlignedBuffer operand_A_gamma_beta; + + public: + + // + // Methods + // + + /// Returns a layout object for the A matrix + CUTLASS_DEVICE + static typename Operator::LayoutA LayoutA() { + return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn}); + } + + /// Returns a layout object for the B matrix + CUTLASS_HOST_DEVICE + static typename Operator::LayoutB LayoutB() { + return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn}); + } + + /// Returns a layout object for the A scale and bias vectors + CUTLASS_DEVICE + static LayoutScaleBias LayoutScaleBias() { + return LayoutScaleBias::packed( + {ShapeGammaBeta::kRow, ShapeGammaBeta::kColumn}); + } + + /// Returns a TensorRef to the A operand + CUTLASS_HOST_DEVICE + TensorRefA operand_A_ref() { + return TensorRefA{operand_A.data(), LayoutA()}; + } + + /// Returns a TensorRef to the B operand + CUTLASS_HOST_DEVICE + TensorRefB operand_B_ref() { + return TensorRefB{operand_B.data(), LayoutB()}; + } + + /// Returns a TensorRef to the A operand Scale vector + CUTLASS_HOST_DEVICE + TensorRefGammaBeta operand_A_gamma_beta_ref() { + return TensorRefGammaBeta{operand_A_gamma_beta.data(), LayoutScaleBias()}; + } + }; + + protected: + + // + // Data members + // + + /// Iterator to load a warp-scoped tile of A operand from shared memory + typename Operator::IteratorA warp_tile_iterator_A_; + + /// Iterator to load a warp-scoped tile of A operand scale and bias vector + /// from shared memory + WarpIteratorGammaBeta warp_tile_iterator_A_gamma_beta_; + + /// Iterator to load a warp-scoped tile of B operand from shared memory + typename Operator::IteratorB warp_tile_iterator_B_; + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + MmaMainloopFusionBase( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + SharedStorage &shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx) + : warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx), + warp_tile_iterator_A_gamma_beta_( + shared_storage.operand_A_gamma_beta_ref(), lane_idx), + warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) {} +}; + + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Cache operation for operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Cache operation for operand B + cutlass::arch::CacheOperation::Kind CacheOpB, + /// Iterates over vectors of var and mean vector in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorVarMean_, + /// Iterates over vectors of scale and bias vector in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorGammaBeta_, + /// Iterates over vectors of scale and bias vector in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorGammaBeta_, + /// Cache operation for scale/bias operand + cutlass::arch::CacheOperation::Kind CacheOpGammaBeta, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// WarpIterator to load Scale or Bias vector from the shared memory + typename WarpIteratorGammaBeta_, + /// Number of stages, + int Stages, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone, + /// Used for partial specialization + typename Enable = bool> +class MmaLayernormMainloopFusionMultistage : + public MmaMainloopFusionBase { +public: + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + ///< Iterates over tiles of A operand in global memory + using IteratorA = IteratorA_; + ///< Iterates over tiles of B operand in global memory + using IteratorB = IteratorB_; + ///< Iterates over tiles of the var and mean vectors in global memory + using IteratorVarMean = IteratorVarMean_; + ///< Iterates over tiles of the scale and bias vectors in global memory + using IteratorGammaBeta = IteratorGammaBeta_; + ///< WarpIterator to load Scale or Bias vector from the shared memory + using WarpIteratorGammaBeta = WarpIteratorGammaBeta_; + ///< Policy describing tuning details + using Policy = Policy_; + + ///< Base class + using Base = MmaMainloopFusionBase; + + ///< Data type of accumulator matrix + using ElementC = ElementC_; + ///< Layout of accumulator matrix + using LayoutC = LayoutC_; + + using SmemIteratorA = SmemIteratorA_; + using SmemIteratorB = SmemIteratorB_; + using SmemIteratorGammaBeta = SmemIteratorGammaBeta_; + + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + static cutlass::arch::CacheOperation::Kind const kCacheOpGammaBeta = + CacheOpGammaBeta; + + // + // Dependent types + // + + /// Fragment of accumulator tile + using FragmentC = typename Policy::Operator::FragmentC; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Minimum architecture is Sm80 to support cp.async + using ArchTag = arch::Sm80; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = Operator::kTransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = Operator::kTransformB; + + /// Internal structure exposed for introspection. + struct Detail { + + static_assert(Base::kWarpGemmIterations > 1, + "The pipelined structure requires at least two warp-level " + "GEMM operations."); + + /// Number of cp.async instructions to load one stage of operand A + static int const AsyncCopyIterationsPerStageA = + IteratorA::ThreadMap::Iterations::kCount; + + /// Number of cp.async instructions to load one stage of operand B + static int const AsyncCopyIterationsPerStageB = + IteratorB::ThreadMap::Iterations::kCount; + + /// Number of stages + static int const kStages = Stages; + + /// Number of cp.async instructions to load on group of operand A + static int const kAccessesPerGroupA = + (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + + /// Number of cp.async instructions to load on group of operand B + static int const kAccessesPerGroupB = + (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + }; + + private: + + using WarpLoadedFragmentA = typename Operator::FragmentA; + using WarpLoadedFragmentB = typename Operator::FragmentB; + using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; + using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; + + using WarpLoadedFragmentVarMean = typename IteratorVarMean::Fragment; + using WarpLoadedFragmentGammaBeta = + typename WarpIteratorGammaBeta::Fragment; + + + private: + + // + // Data members + // + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of A operand scale vector to shared memory + SmemIteratorGammaBeta smem_iterator_A_gamma_beta_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + + int warp_idx_m_; + + int warp_idx_n_; + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + MmaLayernormMainloopFusionMultistage( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + typename Base::SharedStorage &shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx + ): + Base(shared_storage, thread_idx, warp_idx, lane_idx), + smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), + smem_iterator_A_gamma_beta_(shared_storage.operand_A_gamma_beta_ref(), + thread_idx), + smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) + { + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + warp_idx_m_ = warp_idx_mn % Base::WarpCount::kM; + warp_idx_n_ = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset( + {warp_idx_m_, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_A_gamma_beta_.add_tile_offset( + {warp_idx_m_, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset( + {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n_}); + } + + CUTLASS_DEVICE + void copy_tiles_and_advance(IteratorA &iterator_A, + IteratorGammaBeta &iterator_A_gamma_beta, + IteratorB &iterator_B, + int group_start_A = 0, int group_start_B = 0) { + iterator_A.set_iteration_index(group_start_A * + IteratorA::kAccessesPerVector); + this->smem_iterator_A_.set_iteration_index(group_start_A); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { + if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_A.get(); + + if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, iterator_A.valid()); + } else { + cutlass::arch::cp_async( + dst_ptr + v, gmem_ptr, iterator_A.valid()); + } + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + } + + // Async Copy for operand A scale and bias vector. Scale and bias vectors + // are small. One iteration is enough. + if (group_start_A == 0) { + typename IteratorGammaBeta::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_gamma_beta_.get()); + + int const kSrcBytes = + sizeof_bits::value * + IteratorGammaBeta::kElementsPerAccess / 8; + + cutlass::arch::cp_async( + dst_ptr, iterator_A_gamma_beta.get(), iterator_A_gamma_beta.valid()); + } + + iterator_B.set_iteration_index(group_start_B * + IteratorB::kAccessesPerVector); + this->smem_iterator_B_.set_iteration_index(group_start_B); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { + if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_B_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_B.get(); + + if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, iterator_B.valid()); + } else { + cutlass::arch::cp_async( + dst_ptr + v, gmem_ptr, iterator_B.valid()); + } + + ++iterator_B; + } + ++this->smem_iterator_B_; + } + } + } + + /// Perform a threadblock-scoped matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + ///< problem size of GEMM + int gemm_k_iterations, + ///< destination accumulator tile + FragmentC &accum, + ///< iterator over A operand in global memory + IteratorA iterator_A, + ///< iterator over B operand in global memory + IteratorB iterator_B, + ///< iterator over B operand in global memory + IteratorVarMean iterator_var_mean, + ///< iterator over scale and bias vectors in global memory + IteratorGammaBeta iterator_A_gamma_beta, + ///< initial value of accumulator + FragmentC const &src_accum) { + + // + // Prologue + // + // Issue several complete stages + + WarpLoadedFragmentVarMean warp_loaded_frag_var_mean; + iterator_var_mean.add_tile_offset({0, warp_idx_m_}); + iterator_var_mean.load(warp_loaded_frag_var_mean); + + CUTLASS_PRAGMA_UNROLL + for (int stage = 0; stage < Base::kStages - 1; + ++stage, --gemm_k_iterations) { + + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_A_gamma_beta.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + + iterator_A.set_iteration_index(0); + this->smem_iterator_A_.set_iteration_index(0); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + int const kSrcBytes = + sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + int src_bytes = (iterator_A.valid() ? kSrcBytes : 0); + + cutlass::arch::cp_async_zfill( + dst_ptr + v, iterator_A.get(), iterator_A.valid()); + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + + // Async Copy for operand A scale and bias vectors. Scale and bias + // vectors are small. One iteration is enough. + { + typename IteratorGammaBeta::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_gamma_beta_.get()); + + int const kSrcBytes = + sizeof_bits::value * + IteratorGammaBeta::kElementsPerAccess / 8; + + cutlass::arch::cp_async( + dst_ptr, iterator_A_gamma_beta.get(), iterator_A_gamma_beta.valid()); + } + + iterator_B.set_iteration_index(0); + this->smem_iterator_B_.set_iteration_index(0); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_B_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + int const kSrcBytes = + sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + cutlass::arch::cp_async_zfill( + dst_ptr + v, iterator_B.get(), iterator_B.valid()); + + ++iterator_B; + } + + ++this->smem_iterator_B_; + } + + // Move to the next stage + iterator_A.add_tile_offset({0, 1}); + iterator_A_gamma_beta.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_A_gamma_beta_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + + // Defines the boundary of a stage of cp.async. + cutlass::arch::cp_async_fence(); + } + + // Perform accumulation in the 'd' output operand + accum = src_accum; + + // Waits until kStages-2 stages have committed. + cutlass::arch::cp_async_wait(); + __syncthreads(); + + // Pair of fragments used to overlap shared memory loads and math + // instructions + WarpLoadedFragmentA warp_loaded_frag_A[2]; + WarpLoadedFragmentB warp_loaded_frag_B[2]; + WarpLoadedFragmentGammaBeta warp_loaded_frag_A_gamma_beta[2]; + WarpTransformedFragmentA warp_transformed_frag_A[2]; + WarpTransformedFragmentB warp_transformed_frag_B[2]; + + Operator warp_mma; + cutlass::gemm::warp::LayernormScaleBiasTransform + elementwise_transform; + + this->warp_tile_iterator_A_.set_kgroup_index(0); + this->warp_tile_iterator_A_gamma_beta_.set_kgroup_index(0); + this->warp_tile_iterator_B_.set_kgroup_index(0); + + this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); + this->warp_tile_iterator_A_gamma_beta_.load( + warp_loaded_frag_A_gamma_beta[0]); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_A_gamma_beta_; + ++this->warp_tile_iterator_B_; + + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_A_gamma_beta.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + + int smem_write_stage_idx = Base::kStages - 1; + int smem_read_stage_idx = 0; + + warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], + warp_loaded_frag_A[0], warp_loaded_frag_B[0]); + + elementwise_transform(warp_transformed_frag_A[0], + warp_loaded_frag_var_mean, + warp_loaded_frag_A_gamma_beta[0]); + + // + // Mainloop + // + + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > (-Base::kStages + 1);) { + // + // Loop over GEMM K dimension + // + + // Computes a warp-level GEMM on data held in shared memory + // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; + ++warp_mma_k) { + + // Load warp-level tiles from shared memory, wrapping to k offset if + // this is the last group as the case may be. + + this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_A_gamma_beta_.set_kgroup_index( + (warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + + this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_A_gamma_beta_.load( + warp_loaded_frag_A_gamma_beta[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_A_gamma_beta_; + ++this->warp_tile_iterator_B_; + + if (warp_mma_k > 0) { + warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + warp_loaded_frag_A[warp_mma_k % 2], + warp_loaded_frag_B[warp_mma_k % 2]); + + elementwise_transform(warp_transformed_frag_A[warp_mma_k % 2], + warp_loaded_frag_var_mean, + warp_loaded_frag_A_gamma_beta[warp_mma_k % 2]); + } + + warp_mma( + accum, + warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + accum + ); + + // Issue global->shared copies for the this stage + if (warp_mma_k < Base::kWarpGemmIterations - 1) { + int group_start_iteration_A, group_start_iteration_B; + + group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA; + group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB; + + copy_tiles_and_advance(iterator_A, iterator_A_gamma_beta, iterator_B, + group_start_iteration_A, + group_start_iteration_B); + } + + if (warp_mma_k + 2 == Base::kWarpGemmIterations) { + int group_start_iteration_A, group_start_iteration_B; + group_start_iteration_A = + (warp_mma_k + 1) * Detail::kAccessesPerGroupA; + group_start_iteration_B = + (warp_mma_k + 1) * Detail::kAccessesPerGroupB; + + copy_tiles_and_advance(iterator_A, iterator_A_gamma_beta, iterator_B, + group_start_iteration_A, + group_start_iteration_B); + + // Inserts a memory fence between stages of cp.async instructions. + cutlass::arch::cp_async_fence(); + + // Waits until kStages-2 stages have committed. + arch::cp_async_wait(); + __syncthreads(); + + // Move to the next stage + iterator_A.add_tile_offset({0, 1}); + iterator_A_gamma_beta.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_A_gamma_beta_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + + // Add negative offsets to return iterators to the 'start' of the + // circular buffer in shared memory + if (smem_write_stage_idx == (Base::kStages - 1)) { + this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); + this->smem_iterator_A_gamma_beta_.add_tile_offset({0, -Base::kStages}); + this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + smem_write_stage_idx = 0; + } else { + ++smem_write_stage_idx; + } + + if (smem_read_stage_idx == (Base::kStages - 1)) { + this->warp_tile_iterator_A_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations}); + this->warp_tile_iterator_A_gamma_beta_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations}); + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations, + 0}); + smem_read_stage_idx = 0; + } else { + ++smem_read_stage_idx; + } + + --gemm_k_iterations; + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_A_gamma_beta.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + } + + // Do any conversions feeding the first stage at the end of the loop so + // we can start right away on mma instructions + if (warp_mma_k + 1 == Base::kWarpGemmIterations) { + warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], + warp_transformed_frag_B[(warp_mma_k + 1) % 2], + warp_loaded_frag_A[(warp_mma_k + 1) % 2], + warp_loaded_frag_B[(warp_mma_k + 1) % 2]); + + elementwise_transform( + warp_transformed_frag_A[(warp_mma_k + 1) % 2], + warp_loaded_frag_var_mean, + warp_loaded_frag_A_gamma_beta[(warp_mma_k + 1) % 2]); + } + } + + } + + // commit and drain all pending and predicated cp.async pnz from the GEMM mainloop + cutlass::arch::cp_async_fence(); + cutlass::arch::cp_async_wait<0>(); + __syncthreads(); + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_multistage.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_multistage.h new file mode 100644 index 0000000000000000000000000000000000000000..1cc72e2e09d2747bdd823547c4de3756cc598002 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_multistage.h @@ -0,0 +1,741 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. +*/ + +#pragma once + + +#include "cutlass/aligned_buffer.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/threadblock/mma_base.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Cache operation for operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Cache operation for operand B + cutlass::arch::CacheOperation::Kind CacheOpB, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone, + /// Used for partial specialization + typename Enable = bool> +class MmaMultistage : + public MmaBase { +public: + ///< Base class + using Base = MmaBase; + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + ///< Iterates over tiles of A operand in global memory + using IteratorA = IteratorA_; + ///< Iterates over tiles of B operand in global memory + using IteratorB = IteratorB_; + ///< Data type of accumulator matrix + using ElementC = ElementC_; + ///< Layout of accumulator matrix + using LayoutC = LayoutC_; + ///< Policy describing tuning details + using Policy = Policy_; + + using SmemIteratorA = SmemIteratorA_; + using SmemIteratorB = SmemIteratorB_; + + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + // + // Dependent types + // + + /// Fragment of accumulator tile + using FragmentC = typename Policy::Operator::FragmentC; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Minimum architecture is Sm80 to support cp.async + using ArchTag = arch::Sm80; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = Operator::kTransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = Operator::kTransformB; + + /// Internal structure exposed for introspection. + struct Detail { + + /// Number of cp.async instructions to load one stage of operand A + static int const AsyncCopyIterationsPerStageA = + IteratorA::ThreadMap::Iterations::kCount; + + /// Number of cp.async instructions to load one stage of operand B + static int const AsyncCopyIterationsPerStageB = + IteratorB::ThreadMap::Iterations::kCount; + + /// Number of stages + static int const kStages = Stages; + + /// Number of cp.async instructions to load on group of operand A + static int const kAccessesPerGroupA = + (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + + /// Number of cp.async instructions to load on group of operand B + static int const kAccessesPerGroupB = + (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + + // Optional staged-accumulation (e.g., tf32x3 kernels) for improved numerical + // accuracy, where each mainloop iteration first accumulates into a temporary + // set of freshly-cleared accumulators, which are subsequently added to the + // final accumulator set. + static bool const kStagedAccumulation = arch::UseStagedAccumulation::value; + }; + + private: + + + // Structure encapsulating pipeline state live from one iteration to the next + struct PipeState { + + using WarpLoadedFragmentA = typename Operator::FragmentA; + using WarpLoadedFragmentB = typename Operator::FragmentB; + using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; + using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; + + /// Temporary accumulator to facilitate staged-accumulation + FragmentC tmp_accum_; + + /// Pair of A fragments used to overlap shared memory loads and math instructions + WarpLoadedFragmentA warp_loaded_frag_A_[2]; + WarpTransformedFragmentA warp_transformed_frag_A_[2]; + + /// Pair of B fragments used to overlap shared memory loads and math instructions + WarpLoadedFragmentB warp_loaded_frag_B_[2]; + WarpTransformedFragmentB warp_transformed_frag_B_[2]; + }; + + + private: + + // + // Data members + // + + /// Warp-level MMA operator + Operator warp_mma_; + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + + /// Shared memory write stage index + int smem_write_stage_idx_; + + /// Shared memory read stage index + int smem_read_stage_idx_; + + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + MmaMultistage( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + typename Base::SharedStorage &shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx + ): + Base(shared_storage, thread_idx, warp_idx, lane_idx), + smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), + smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx), + smem_write_stage_idx_(0), + smem_read_stage_idx_(0) + { + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset( + {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset( + {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); + } + + /// Advance shared memory read-iterators to the next stage + CUTLASS_DEVICE + void advance_smem_read_stage() + { + ++smem_read_stage_idx_; + + if (smem_read_stage_idx_ == Base::kStages) { + // Wrap back around to the 'start' of the circular buffer in shared memory + this->warp_tile_iterator_A_.add_tile_offset({0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); + this->warp_tile_iterator_B_.add_tile_offset({-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); + smem_read_stage_idx_ = 0; + } + } + + /// Advance global memory read-iterators and shared memory write-iterators to the stage + CUTLASS_DEVICE + void advance_smem_write_stage( + IteratorA &iterator_A, + IteratorB &iterator_B) + { + // Advance global iterators + iterator_A.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + + // Advance shared iterators + smem_iterator_A_.add_tile_offset({0, 1}); + smem_iterator_B_.add_tile_offset({1, 0}); + + // Increment shared memory write stage index + ++smem_write_stage_idx_; + + if (smem_write_stage_idx_ == Base::kStages) { + // Wrap back around to the 'start' of the circular buffer in shared memory + smem_iterator_A_.add_tile_offset({0, -Base::kStages}); + smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + smem_write_stage_idx_ = 0; + } + } + + CUTLASS_DEVICE + void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B, + int group_start_A = 0, int group_start_B = 0) { + iterator_A.set_iteration_index(group_start_A * + IteratorA::kAccessesPerVector); + this->smem_iterator_A_.set_iteration_index(group_start_A); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { + if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_A.get(); + + if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, iterator_A.valid()); + } else { + cutlass::arch::cp_async( + dst_ptr + v, gmem_ptr, iterator_A.valid()); + } + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + } + + iterator_B.set_iteration_index(group_start_B * + IteratorB::kAccessesPerVector); + this->smem_iterator_B_.set_iteration_index(group_start_B); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { + if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_B_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_B.get(); + + if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, iterator_B.valid()); + } else { + cutlass::arch::cp_async( + dst_ptr + v, gmem_ptr, iterator_B.valid()); + } + + ++iterator_B; + } + ++this->smem_iterator_B_; + } + } + } + + /// GEMM prologue. Bootstrap the global->shared memory pipeline by fetching + /// the global fragments needed by the first kStages-1 threadblock mainloop iterations + CUTLASS_DEVICE + void prologue( + IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory + IteratorB &iterator_B, ///< [in|out] iterator over B operand in global memory + int &gemm_k_iterations) ///< [in|out] number of threadblock mainloop iterations remaining + { + // Issue several complete stages + CUTLASS_PRAGMA_UNROLL + for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) { + + // Disable global fetching if done with global fetch iterations + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + + iterator_A.set_iteration_index(0); + this->smem_iterator_A_.set_iteration_index(0); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + int const kSrcBytes = + sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + int src_bytes = (iterator_A.valid() ? kSrcBytes : 0); + + cutlass::arch::cp_async_zfill( + dst_ptr + v, iterator_A.get(), iterator_A.valid()); + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + + iterator_B.set_iteration_index(0); + this->smem_iterator_B_.set_iteration_index(0); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_B_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + int const kSrcBytes = + sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + cutlass::arch::cp_async_zfill( + dst_ptr + v, iterator_B.get(), iterator_B.valid()); + + ++iterator_B; + } + + ++this->smem_iterator_B_; + } + + // Move to the next write stage + advance_smem_write_stage(iterator_A, iterator_B); + + // Defines the boundary of a stage of cp.async. + cutlass::arch::cp_async_fence(); + } + + // Optionally clear the remaining stages of SMEM. This is a functional requirement for + // some kernels so that all accumulator elements outside the GEMM footprint are zero. + if (SharedMemoryClear == SharedMemoryClearOption::kClearLastStage) { + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA last_smem_iterator_A(this->smem_iterator_A_); + typename IteratorA::AccessType zero_A; + + zero_A.clear(); + last_smem_iterator_A.set_iteration_index(0); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { + + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + last_smem_iterator_A.get()); + + *dst_ptr = zero_A; + + ++last_smem_iterator_A; + } + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB last_smem_iterator_B(this->smem_iterator_B_); + typename IteratorB::AccessType zero_B; + + zero_B.clear(); + last_smem_iterator_B.set_iteration_index(0); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { + + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + last_smem_iterator_B.get()); + + *dst_ptr = zero_B; + + ++last_smem_iterator_B; + } + } + } + + + /// Wait until we have at least one completed global fetch stage + CUTLASS_DEVICE + void gmem_wait() + { + // Wait until we have at least one committed global fetch stage. (#uncommitted = Base::kStages - 1 - #committed) + cutlass::arch::cp_async_wait(); + __syncthreads(); + } + + + /// Perform a threadblock mainloop iteration of matrix multiply-accumulate + CUTLASS_DEVICE + void mac_loop_iter( + PipeState &pipe_state, ///< [in|out] loop-carried pipeline state + FragmentC &accum, ///< [in|out] destination accumulator tile + IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory + IteratorB &iterator_B, ///< [in|out] iterator over B operand in global memory + int &gemm_k_iterations) ///< [in|out] number of threadblock mainloop iterations remaining + { + // Unroll the warp-level MMA tiles of a threadblock's mainloop iteration + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { + + // Load the next warp-tile's A fragment from shared memory + this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_A_.load(pipe_state.warp_loaded_frag_A_[(warp_mma_k + 1) % 2]); + ++this->warp_tile_iterator_A_; + + // Load the next warp-tile's B fragment from shared memory + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.load(pipe_state.warp_loaded_frag_B_[(warp_mma_k + 1) % 2]); + ++this->warp_tile_iterator_B_; + + // Except for the first warp-tile, all warp-tiles convert their incoming shared memory fragments as necessary + if (warp_mma_k > 0) { + warp_mma_.transform( + pipe_state.warp_transformed_frag_A_[warp_mma_k % 2], + pipe_state.warp_transformed_frag_B_[warp_mma_k % 2], + pipe_state.warp_loaded_frag_A_[warp_mma_k % 2], + pipe_state.warp_loaded_frag_B_[warp_mma_k % 2]); + } + + // Execute the current warp-tile of MMA operations + if (Detail::kStagedAccumulation) { + warp_mma_( + pipe_state.tmp_accum_, + pipe_state.warp_transformed_frag_A_[warp_mma_k % 2], + pipe_state.warp_transformed_frag_B_[warp_mma_k % 2], + pipe_state.tmp_accum_ + ); + + if (warp_mma_k == 0) { + plus plus_accum; + accum = plus_accum(accum, pipe_state.tmp_accum_); + pipe_state.tmp_accum_.clear(); + } + } else { + warp_mma_( + accum, + pipe_state.warp_transformed_frag_A_[warp_mma_k % 2], + pipe_state.warp_transformed_frag_B_[warp_mma_k % 2], + accum + ); + } + + // Except for the last warp-tile, all warp-tiles issue their share of + // global->shared fragment copies + if (warp_mma_k < Base::kWarpGemmIterations - 1) { + + int group_start_iteration_A, group_start_iteration_B; + group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA; + group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB; + + copy_tiles_and_advance( + iterator_A, + iterator_B, + group_start_iteration_A, + group_start_iteration_B); + } + + // The second-to-last warp-tile also: + // - performs the last warp-tile's share of global->shared fragment copies + // - moves to the next global fetch stage + if (warp_mma_k + 2 == Base::kWarpGemmIterations) { + + // Performs the last warp-tile's share of global->shared fragment copies + int group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; + int group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; + + copy_tiles_and_advance( + iterator_A, + iterator_B, + group_start_iteration_A, + group_start_iteration_B); + + // Inserts a memory fence between stages of cp.async instructions. + cutlass::arch::cp_async_fence(); + + // Wait until we have at least one completed global fetch stage + gmem_wait(); + + // Move to the next global fetch stage + advance_smem_write_stage(iterator_A, iterator_B); + advance_smem_read_stage(); + + // Disable global fetching when done with global fetch iterations + --gemm_k_iterations; + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + } + + // The last warp-tile also converts the shared memory fragments used by + // the first warp-tile of the next iteration, if necessary (so we can + // immediately start issuing MMA instructions at the top of the loop ) + if (warp_mma_k + 1 == Base::kWarpGemmIterations) { + + warp_mma_.transform( + pipe_state.warp_transformed_frag_A_[(warp_mma_k + 1) % 2], + pipe_state.warp_transformed_frag_B_[(warp_mma_k + 1) % 2], + pipe_state.warp_loaded_frag_A_[(warp_mma_k + 1) % 2], + pipe_state.warp_loaded_frag_B_[(warp_mma_k + 1) % 2]); + } + + } + } + + + /// Perform the specified number of threadblock mainloop iterations of matrix + /// multiply-accumulate. Assumes prologue has been initiated. + CUTLASS_DEVICE + void gemm_iters( + int gemm_k_iterations, ///< number of threadblock mainloop iterations + FragmentC &accum, ///< [in|out] accumulator tile + IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory + IteratorB &iterator_B) ///< [in|out] iterator over B operand in global memory + { + PipeState pipe_state; + + // Disable global fetching if done with global fetch iterations + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + + // Load first warp-tile's A fragment from shared memory + this->warp_tile_iterator_A_.set_kgroup_index(0); + this->warp_tile_iterator_A_.load(pipe_state.warp_loaded_frag_A_[0]); + ++this->warp_tile_iterator_A_; + + // Load first warp-tile's B fragment from shared memory + this->warp_tile_iterator_B_.set_kgroup_index(0); + this->warp_tile_iterator_B_.load(pipe_state.warp_loaded_frag_B_[0]); + ++this->warp_tile_iterator_B_; + + // Transform, if necessary, the first warp-tile's shared memory fragments + warp_mma_.transform( + pipe_state.warp_transformed_frag_A_[0], + pipe_state.warp_transformed_frag_B_[0], + pipe_state.warp_loaded_frag_A_[0], + pipe_state.warp_loaded_frag_B_[0]); + + if (Detail::kStagedAccumulation) { + pipe_state.tmp_accum_.clear(); + } + + // Mainloop + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > (-Base::kStages + 1);) { + mac_loop_iter( + pipe_state, + accum, + iterator_A, + iterator_B, + gemm_k_iterations); + } + + if (Detail::kStagedAccumulation) { + plus plus_accum; + accum = plus_accum(accum, pipe_state.tmp_accum_); + } + + // Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop + cutlass::arch::cp_async_fence(); + cutlass::arch::cp_async_wait<0>(); + __syncthreads(); + + } + + + /// Prepares the class for another prologue. + CUTLASS_DEVICE + void wind_down() + { + // Catch-up the smem-read iterator to the smem-write iterator (so this class can be reused for another tile's prologue) + + // First, increment remaining warp tiles to get to the next full stage. (Ideally we would + // just decrement one tile, but not all iterators implement --() decrement.) + #pragma unroll + for (int warp_mma_k = 1; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) + { + this->warp_tile_iterator_A_.set_kgroup_index(warp_mma_k); + this->warp_tile_iterator_B_.set_kgroup_index(warp_mma_k); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + } + smem_read_stage_idx_++; + + // Then wrap back two full stages (one for the tile advancing we just did, and one to catch the write iterators) + static const int kStageIters = Policy::kPartitionsK * Base::kWarpGemmIterations; + if (smem_read_stage_idx_ > 1) + { + this->warp_tile_iterator_A_.add_tile_offset({0, (-2 * kStageIters)}); + this->warp_tile_iterator_B_.add_tile_offset({(-2 * kStageIters), 0}); + } + else + { + this->warp_tile_iterator_A_.add_tile_offset({0, ((Base::kStages - 2) * kStageIters)}); + this->warp_tile_iterator_B_.add_tile_offset({((Base::kStages - 2) * kStageIters), 0}); + } + smem_read_stage_idx_ = smem_write_stage_idx_; + } + + + /// Perform a threadblock-scoped matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + ///< problem size of GEMM + int gemm_k_iterations, + ///< destination accumulator tile + FragmentC &accum, + ///< iterator over A operand in global memory + IteratorA iterator_A, + ///< iterator over B operand in global memory + IteratorB iterator_B, + ///< initial value of accumulator + FragmentC const &src_accum) { + + // Prologue (start fetching iterations of global fragments into shared memory) + prologue(iterator_A, iterator_B, gemm_k_iterations); + + // Wait until we have at least one completed global fetch stage + gmem_wait(); + + // Initialize destination accumulators with source accumulators + accum = src_accum; + + // Perform the MAC-iterations + gemm_iters(gemm_k_iterations, accum, iterator_A, iterator_B); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_pipelined.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_pipelined.h new file mode 100644 index 0000000000000000000000000000000000000000..8ada21cd4b8b4976565741132495822bdaeef69c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_pipelined.h @@ -0,0 +1,439 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/aligned_buffer.h" +#include "cutlass/numeric_conversion.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/threadblock/mma_base.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Transformation applied to A operand + typename TransformA_ = NumericArrayConverter< + typename SmemIteratorA_::Element, + typename IteratorA_::Element, + IteratorA_::Fragment::kElements>, + /// + /// Transformation applied to B operand + typename TransformB_ = NumericArrayConverter< + typename SmemIteratorB_::Element, + typename IteratorB_::Element, + IteratorB_::Fragment::kElements>, + /// Used for partial specialization + typename Enable = bool +> +class MmaPipelined : public MmaBase { +public: + + ///< Base class + using Base = MmaBase; + + using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory + using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory + using ElementC = ElementC_; ///< Data type of accumulator matrix + using LayoutC = LayoutC_; ///< Layout of accumulator matrix + using Policy = Policy_; ///< Policy describing tuning details + + using SmemIteratorA = SmemIteratorA_; + using SmemIteratorB = SmemIteratorB_; + + using TransformA = TransformA_; + using TransformB = TransformB_; + + // + // Dependent types + // + + /// Fragment of operand A loaded from global memory + using FragmentA = typename IteratorA::Fragment; + + /// Fragment of operand B loaded from global memory + using FragmentB = typename IteratorB::Fragment; + + /// Fragment of accumulator tile + using FragmentC = typename Policy::Operator::FragmentC; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Obtain the arch tag from the warp-level operator + using ArchTag = typename Policy::Operator::ArchTag; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = Operator::kTransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = Operator::kTransformB; + + // staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline) + static_assert((Base::kStages==2), "MmaPipelined requires kStages set to value 2"); + +protected: + + // + // Data members + // + + /// Warp-level MMA operator + Operator warp_mma; + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + + ///< transformation applied to A fragment + TransformA transform_A_; + + ///< transformation applied to B fragment + TransformB transform_B_; + + /// Shared memory write stage index + int smem_write_stage_idx; + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + MmaPipelined( + typename Base::SharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM + int thread_idx, ///< ID within the threadblock + int warp_idx, ///< ID of warp + int lane_idx, ///< ID of each thread within a warp + TransformA transform_A = TransformA(), ///< transformation applied to A fragment + TransformB transform_B = TransformB() ///< transformation applied to B fragment + ): + Base(shared_storage, thread_idx, warp_idx, lane_idx), + smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), + smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx), + transform_A_(transform_A), + transform_B_(transform_B), + smem_write_stage_idx(0) + { + + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); + } + + + /// Advance shared memory write-iterators to the next stage + CUTLASS_DEVICE + void advance_smem_write_stage() + { + ++this->smem_iterator_A_; + ++this->smem_iterator_B_; + + // Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory + if (smem_write_stage_idx == 1) { + this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); + this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + } + + smem_write_stage_idx ^= 1; + } + + /// Advance shared memory read- and write-iterators to the next stage + CUTLASS_DEVICE + void advance_smem_stages() + { + ++this->smem_iterator_A_; + ++this->smem_iterator_B_; + + // Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory + if (smem_write_stage_idx == 1) { + // wrap write stage + this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); + this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + } + else + { + // wrap read stage + this->warp_tile_iterator_A_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); + } + + smem_write_stage_idx ^= 1; + } + + + /// GEMM prologue. Bootstrap the global->shared memory pipeline by fetching + /// the global fragments needed by the first kStages-1 threadblock mainloop iterations + CUTLASS_DEVICE + void prologue( + IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory + IteratorB &iterator_B, ///< [in|out] iterator over B operand in global memory + int &gemm_k_iterations) ///< [in|out] number of threadblock mainloop iterations remaining + { + // The last kblock is loaded in the prolog + + // Load A fragment from global A + FragmentA tb_frag_A; + tb_frag_A.clear(); + iterator_A.load(tb_frag_A); + ++iterator_A; + + // Load B fragment from global B + FragmentB tb_frag_B; + tb_frag_B.clear(); + iterator_B.load(tb_frag_B); + ++iterator_B; + + // Store A and B fragments to shared + this->smem_iterator_A_.store(transform_A_(tb_frag_A)); + this->smem_iterator_B_.store(transform_B_(tb_frag_B)); + + // Advance write stage + advance_smem_write_stage(); + } + + /// Wait until we have at least one completed global fetch stage + CUTLASS_DEVICE + void gmem_wait() + { + __syncthreads(); + } + + + /// Perform the specified number of threadblock mainloop iterations of matrix + /// multiply-accumulate. Assumes prologue has been initiated. + CUTLASS_DEVICE + void gemm_iters( + int gemm_k_iterations, ///< number of threadblock mainloop iterations + FragmentC &accum, ///< [in|out] accumulator tile + IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory + IteratorB &iterator_B) ///< [in|out] iterator over B operand in global memory + { + using WarpFragmentA = typename Operator::FragmentA; + using WarpFragmentB = typename Operator::FragmentB; + + // Pair of fragments used to overlap shared memory loads and math instructions + WarpFragmentA warp_frag_A[2]; + WarpFragmentB warp_frag_B[2]; + + // Load A fragment from shared A + this->warp_tile_iterator_A_.set_kgroup_index(0); + this->warp_tile_iterator_A_.load(warp_frag_A[0]); + ++this->warp_tile_iterator_A_; + + // Load B fragment from shared B + this->warp_tile_iterator_B_.set_kgroup_index(0); + this->warp_tile_iterator_B_.load(warp_frag_B[0]); + ++this->warp_tile_iterator_B_; + + // Pair of fragments used to overlap global memory loads and math instructions; + FragmentA tb_frag_A; + FragmentB tb_frag_B; + + // Avoid reading out of bounds + iterator_A.clear_mask(gemm_k_iterations <= 1); + iterator_B.clear_mask(gemm_k_iterations <= 1); + + // + // Mainloop + // + + // Note: The main loop does not support Base::kWarpGemmIterations == 2. + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > 0; --gemm_k_iterations) { + // + // Loop over GEMM K dimension + // + + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { + + // Load warp-level tiles from shared memory, wrapping to k offset if this is the last group + // as the case may be. + + if (warp_mma_k == Base::kWarpGemmIterations - 1) { + + // Write fragments to shared memory + this->smem_iterator_A_.store(transform_A_(tb_frag_A)); + + this->smem_iterator_B_.store(transform_B_(tb_frag_B)); + + // Wait until we have at least one completed global fetch stage + gmem_wait(); + + // Advance smem read and write stages + advance_smem_stages(); + } + + this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + + this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + if (warp_mma_k == 0) { + + // Load fragment from global A + tb_frag_A.clear(); + iterator_A.load(tb_frag_A); + ++iterator_A; + + // Load fragment from global B + tb_frag_B.clear(); + iterator_B.load(tb_frag_B); + ++iterator_B; + + // Avoid reading out of bounds if this was the last loop iteration + iterator_A.clear_mask(gemm_k_iterations <= 2); + iterator_B.clear_mask(gemm_k_iterations <= 2); + } + + warp_mma( + accum, + warp_frag_A[warp_mma_k % 2], + warp_frag_B[warp_mma_k % 2], + accum); + } + } + + } + + + /// Prepares the class for another prologue. + CUTLASS_DEVICE + void wind_down() + { + // First, increment remaining warp tiles to catch it up with the write stage. + #pragma unroll + for (int warp_mma_k = 1; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) + { + this->warp_tile_iterator_A_.set_kgroup_index(warp_mma_k); + this->warp_tile_iterator_B_.set_kgroup_index(warp_mma_k); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + } + + // If we bumped the read iterators to the end of the circular buffer, wrap them around to + // align them with the write iterators + if (smem_write_stage_idx == 0) + { + this->warp_tile_iterator_A_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); + } + } + + /// Perform a threadblock-scoped matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + int gemm_k_iterations, ///< number of iterations of the mainloop + FragmentC &accum, ///< destination accumulator tile + IteratorA iterator_A, ///< iterator over A operand in global memory + IteratorB iterator_B, ///< iterator over B operand in global memory + FragmentC const &src_accum) ///< source accumulator tile + { + // Prologue + prologue(iterator_A, iterator_B, gemm_k_iterations); + + // Wait until we have at least one completed global fetch stage + gmem_wait(); + + // Perform accumulation in the 'd' output operand + accum = src_accum; + + // Perform the MAC-iterations + gemm_iters(gemm_k_iterations, accum, iterator_A, iterator_B); + } + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_base.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_base.h new file mode 100644 index 0000000000000000000000000000000000000000..d21600ea25ced7927afce24e3825cc9e0d653bc3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_base.h @@ -0,0 +1,208 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. +*/ + +#pragma once + +#include "cutlass/aligned_buffer.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Used for partial specialization + typename Enable = bool> +class MmaPlanarComplexBase { + public: + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + + ///< Policy describing tuning details + using Policy = Policy_; + + // + // Dependent types + // + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Shape describing the overall GEMM computed from shared memory + /// by each warp. + using WarpGemm = typename Policy::Operator::Shape; + + /// Shape describing the number of warps filling the CTA + using WarpCount = GemmShape; + + /// Number of warp-level GEMM oeprations + static int const kWarpGemmIterations = + (WarpGemm::kK / Operator::Policy::MmaShape::kK); + + /// Number of stages + static int const kStages = Stages; + + /// Tensor reference to the A operand + using TensorRefA = TensorRef; + + /// Tensor reference to the B operand + using TensorRefB = TensorRef; + + // + // Nested structs + // + + /// Shared storage object needed by threadblock-scoped GEMM + class SharedStorage { + public: + // + // Type definitions + // + + /// Shape of the A matrix operand in shared memory + using ShapeA = MatrixShape; + + /// Stride to the imaginary part of the A operand + static int const kImaginaryStrideA = ShapeA::kCount; + + /// Shape of the B matrix operand in shared memory + using ShapeB = + MatrixShape; + + /// Stride to the imaginary part of the A operand + static int const kImaginaryStrideB = ShapeB::kCount; + + public: + // + // Data members + // + + /// Buffer for A operand + AlignedBuffer operand_A; + + /// Buffer for B operand + AlignedBuffer operand_B; + + public: + + // + // Methods + // + + /// Returns a layout object for the A matrix + CUTLASS_DEVICE + static typename Operator::LayoutA LayoutA() { + return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn}); + } + + /// Returns a layout object for the B matrix + CUTLASS_HOST_DEVICE + static typename Operator::LayoutB LayoutB() { + return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn}); + } + + /// Returns a TensorRef to the A operand + CUTLASS_HOST_DEVICE + TensorRefA operand_A_ref() { + return TensorRefA{operand_A.data(), LayoutA()}; + } + + /// Returns a TensorRef to the B operand + CUTLASS_HOST_DEVICE + TensorRefB operand_B_ref() { + return TensorRefB{operand_B.data(), LayoutB()}; + } + }; + + protected: + + // + // Data members + // + + /// Iterator to load a warp-scoped tile of A operand from shared memory + typename Operator::IteratorA warp_tile_iterator_A_; + + /// Iterator to load a warp-scoped tile of B operand from shared memory + typename Operator::IteratorB warp_tile_iterator_B_; + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + MmaPlanarComplexBase( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + SharedStorage &shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx + ): + warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx), + warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) { + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_multistage.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_multistage.h new file mode 100644 index 0000000000000000000000000000000000000000..9ff59893870617887e781fca0b622687931b98c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_multistage.h @@ -0,0 +1,646 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/array_planar_complex.h" +#include "cutlass/functional.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/threadblock/mma_planar_complex_base.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Cache operation for operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Cache operation for operand B + cutlass::arch::CacheOperation::Kind CacheOpB, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Transformation applied to A + ComplexTransform TransformA = ComplexTransform::kNone, + /// Transformation applied to B + ComplexTransform TransformB = ComplexTransform::kNone +> +class MmaPlanarComplexMultistage : + public MmaPlanarComplexBase { +public: + ///< Base class + using Base = MmaPlanarComplexBase; + + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + + ///< Iterates over tiles of A operand in global memory + using IteratorA = IteratorA_; + + ///< Iterates over tiles of B operand in global memory + using IteratorB = IteratorB_; + + ///< Data type of accumulator matrix + using ElementC = ElementC_; + + ///< Layout of accumulator matrix + using LayoutC = LayoutC_; + + ///< Policy describing tuning details + using Policy = Policy_; + + ///< Archtecture tag + using ArchTag = arch::Sm80; + + using SmemIteratorA = SmemIteratorA_; + using SmemIteratorB = SmemIteratorB_; + + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + /// Transformation applied to A + static ComplexTransform const kTransformA = TransformA; + + /// Transformation applied to B + static ComplexTransform const kTransformB = TransformB; + + // + // Dependent types + // + + /// Fragment of accumulator tile + using FragmentC = ArrayPlanarComplex< + typename Policy::Operator::FragmentC::Element, + Policy::Operator::FragmentC::kElements + >; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Internal structure exposed for introspection. + struct Detail { + + static_assert(Base::kWarpGemmIterations > 1, + "The pipelined structure requires at least two warp-level " + "GEMM operations."); + + /// Number of cp.async instructions to load one stage of operand A + static int const TBLoadIterationsA = + IteratorA::ThreadMap::Iterations::kCount; + + /// Number of cp.async instructions to load one stage of operand B + static int const TBLoadIterationsB = + IteratorB::ThreadMap::Iterations::kCount; + + /// Number of stages + static int const kStages = Stages; + + static int const kAccessesPerGroupA = + (TBLoadIterationsA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + + static int const kAccessesPerGroupB = + (TBLoadIterationsB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + }; + + private: + + using WarpFragmentA = typename Operator::FragmentA; + using WarpFragmentB = typename Operator::FragmentB; + + private: + + // + // Data members + // + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + MmaPlanarComplexMultistage( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + typename Base::SharedStorage &shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx + ): + Base(shared_storage, thread_idx, warp_idx, lane_idx), + smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), + smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) + { + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); + } + +private: + + CUTLASS_DEVICE + void copy_tiles_and_advance( + IteratorA &iterator_A_real, + IteratorA &iterator_A_imag, + + IteratorB &iterator_B_real, + IteratorB &iterator_B_imag, + + int group_start_A = 0, + int group_start_B = 0) { + + iterator_A_real.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector); + iterator_A_imag.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector); + this->smem_iterator_A_.set_iteration_index(group_start_A); + + // Load for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { + + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast(this->smem_iterator_A_.get()); + + int const kSrcBytes = + sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + + auto gmem_ptr_real = iterator_A_real.get(); + auto gmem_ptr_imag = iterator_A_imag.get(); + + bool pred_guard = iterator_A_real.valid(); + cutlass::arch::cp_async( + dst_ptr + v, + gmem_ptr_real, + pred_guard); + cutlass::arch::cp_async( + dst_ptr + v + (Base::SharedStorage::kImaginaryStrideA / IteratorA::ThreadMap::kElementsPerAccess), + reinterpret_cast(gmem_ptr_imag), + pred_guard); + + ++iterator_A_real; + ++iterator_A_imag; + } + + ++this->smem_iterator_A_; + } + + iterator_B_real.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector); + iterator_B_imag.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector); + this->smem_iterator_B_.set_iteration_index(group_start_B); + + // Load for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast(this->smem_iterator_B_.get()); + + int const kSrcBytes = + sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + auto gmem_ptr_real = iterator_B_real.get(); + auto gmem_ptr_imag = iterator_B_imag.get(); + + bool pred_guard = iterator_B_real.valid(); + cutlass::arch::cp_async( + dst_ptr + v, + gmem_ptr_real, + pred_guard); + cutlass::arch::cp_async( + dst_ptr + v + (Base::SharedStorage::kImaginaryStrideB / IteratorB::ThreadMap::kElementsPerAccess), + reinterpret_cast(gmem_ptr_imag), + pred_guard); + + ++iterator_B_real; + ++iterator_B_imag; + } + ++this->smem_iterator_B_; + } + } + + CUTLASS_DEVICE + void warp_mma_planar_complex( + Operator & warp_mma, + FragmentC &accum, + WarpFragmentA const & real_A, + WarpFragmentA const & imag_A, + WarpFragmentB const & real_B, + WarpFragmentB const & imag_B) { + + cutlass::negate> neg_op_B; + + WarpFragmentB neg_real_B = neg_op_B(real_B); + WarpFragmentB neg_imag_B = neg_op_B(imag_B); + + warp_mma(accum.real, real_A, real_B, accum.real); + + if (kTransformB == ComplexTransform::kNone) { + warp_mma(accum.imag, real_A, imag_B, accum.imag); + } + else { + warp_mma(accum.imag, real_A, neg_imag_B, accum.imag); + } + + if (kTransformA == ComplexTransform::kNone) { + warp_mma(accum.imag, imag_A, real_B, accum.imag); + } + else { + warp_mma(accum.imag, imag_A, neg_real_B, accum.imag); + } + + if (kTransformA == ComplexTransform::kNone ^ kTransformB == ComplexTransform::kNone) { + warp_mma(accum.real, imag_A, imag_B, accum.real); + } + else { + warp_mma(accum.real, imag_A, neg_imag_B, accum.real); + } + } + +public: + + /// Perform a threadblock-scoped matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + ///< problem size of GEMM + int gemm_k_iterations, + ///< destination accumulator tile + FragmentC &accum, + ///< iterator over A operand in global memory + IteratorA iterator_A_real, + ///< iterator over A operand in global memory + IteratorA iterator_A_imag, + ///< iterator over B operand in global memory + IteratorB iterator_B_real, + ///< iterator over B operand in global memory + IteratorB iterator_B_imag, + ///< initial value of accumulator + FragmentC const &src_accum) { + + // + // Prologue + // + + // Issue several complete stages + CUTLASS_PRAGMA_UNROLL + for (int stage = 0; stage < Base::kStages - 1; + ++stage, --gemm_k_iterations) { + + iterator_A_real.clear_mask(gemm_k_iterations == 0); + iterator_A_imag.clear_mask(gemm_k_iterations == 0); + iterator_B_real.clear_mask(gemm_k_iterations == 0); + iterator_B_imag.clear_mask(gemm_k_iterations == 0); + + iterator_A_real.set_iteration_index(0); + iterator_A_imag.set_iteration_index(0); + + this->smem_iterator_A_.set_iteration_index(0); + + // Load for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::TBLoadIterationsA; ++j) { + + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast(this->smem_iterator_A_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + + int const kSrcBytes = + sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; + + bool pred_guard = iterator_A_real.valid(); + + auto src_ptr_real = iterator_A_real.get(); + auto src_ptr_imag = iterator_A_imag.get(); + + cutlass::arch::cp_async_zfill( + dst_ptr + v, src_ptr_real, pred_guard); + + cutlass::arch::cp_async_zfill( + dst_ptr + v + + Base::SharedStorage::kImaginaryStrideA / + IteratorA::ThreadMap::kElementsPerAccess, + reinterpret_cast(src_ptr_imag), + pred_guard); + + ++iterator_A_real; + ++iterator_A_imag; + } + + ++this->smem_iterator_A_; + } + + iterator_B_real.set_iteration_index(0); + iterator_B_imag.set_iteration_index(0); + + this->smem_iterator_B_.set_iteration_index(0); + + // Load for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::TBLoadIterationsB; ++j) { + + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast(this->smem_iterator_B_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + + int const kSrcBytes = + sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; + + bool pred_guard = iterator_B_real.valid(); + + auto src_ptr_real = iterator_B_real.get(); + auto src_ptr_imag = iterator_B_imag.get(); + + cutlass::arch::cp_async_zfill( + dst_ptr + v, src_ptr_real, pred_guard); + + cutlass::arch::cp_async_zfill( + dst_ptr + v + + Base::SharedStorage::kImaginaryStrideB / + IteratorB::ThreadMap::kElementsPerAccess, + reinterpret_cast(src_ptr_imag), + pred_guard); + + ++iterator_B_real; + ++iterator_B_imag; + } + + ++this->smem_iterator_B_; + } + + // Move to the next stage + iterator_A_real.add_tile_offset({0, 1}); + iterator_A_imag.add_tile_offset({0, 1}); + + iterator_B_real.add_tile_offset({1, 0}); + iterator_B_imag.add_tile_offset({1, 0}); + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + + // Inserts a memory fence between stages of cp.async instructions + cutlass::arch::cp_async_fence(); + } + + // Perform accumulation in the 'd' output operand + accum = src_accum; + + // Blocks until all but kStages-2 cp.async stages have committed. + cutlass::arch::cp_async_wait(); + __syncthreads(); + + // Pair of fragments used to overlap shared memory loads and math + // instructions + + WarpFragmentA warp_frag_real_A[2]; + WarpFragmentA warp_frag_imag_A[2]; + + WarpFragmentB warp_frag_real_B[2]; + WarpFragmentB warp_frag_imag_B[2]; + + this->warp_tile_iterator_A_.set_kgroup_index(0); + this->warp_tile_iterator_B_.set_kgroup_index(0); + + this->warp_tile_iterator_A_.load(warp_frag_real_A[0]); + this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[0], Base::SharedStorage::kImaginaryStrideA); + + this->warp_tile_iterator_B_.load(warp_frag_real_B[0]); + this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[0], Base::SharedStorage::kImaginaryStrideB); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + iterator_A_real.clear_mask(gemm_k_iterations == 0); + iterator_A_imag.clear_mask(gemm_k_iterations == 0); + iterator_B_real.clear_mask(gemm_k_iterations == 0); + iterator_B_imag.clear_mask(gemm_k_iterations == 0); + + // Start issuing the first group of the next stage outside of the mainloop + copy_tiles_and_advance(iterator_A_real, iterator_A_imag, iterator_B_real, iterator_B_imag); + + Operator warp_mma; + + int smem_write_stage_idx = Base::kStages - 1; + int smem_read_stage_idx = 0; + + // + // Mainloop + // + + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > (-Base::kStages + 1);) { + // + // Loop over GEMM K dimension + // + + // Computes a warp-level GEMM on data held in shared memory + // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; + ++warp_mma_k) { + + // Load warp-level tiles from shared memory, wrapping to k offset if + // this is the last group as the case may be. + + this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + + this->warp_tile_iterator_A_.load(warp_frag_real_A[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideA); + + this->warp_tile_iterator_B_.load(warp_frag_real_B[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideB); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + // Issue global->shared copies for the next stage + int group_start_iteration_A, group_start_iteration_B; + + if (warp_mma_k + 1 == Base::kWarpGemmIterations) { + group_start_iteration_A = 0; + group_start_iteration_B = 0; + } + else { + group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; + group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; + } + + copy_tiles_and_advance( + iterator_A_real, + iterator_A_imag, + iterator_B_real, + iterator_B_imag, + group_start_iteration_A, + group_start_iteration_B); + + if (warp_mma_k + 2 == Base::kWarpGemmIterations) { + // Inserts a memory fence between stages of cp.async instructions + cutlass::arch::cp_async_fence(); + + // Blocks until all but kStages-2 cp.async stages have committed. + arch::cp_async_wait(); + __syncthreads(); + + // Move to the next stage + iterator_A_real.add_tile_offset({0, 1}); + iterator_A_imag.add_tile_offset({0, 1}); + + iterator_B_real.add_tile_offset({1, 0}); + iterator_B_imag.add_tile_offset({1, 0}); + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + + // Add negative offsets to return iterators to the 'start' of the + // circular buffer in shared memory + if (smem_write_stage_idx == (Base::kStages - 1)) { + this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); + this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + smem_write_stage_idx = 0; + } else { + ++smem_write_stage_idx; + } + + if (smem_read_stage_idx == (Base::kStages - 1)) { + + this->warp_tile_iterator_A_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations}); + + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations, + 0}); + smem_read_stage_idx = 0; + } else { + ++smem_read_stage_idx; + } + + --gemm_k_iterations; + iterator_A_real.clear_mask(gemm_k_iterations == 0); + iterator_A_imag.clear_mask(gemm_k_iterations == 0); + iterator_B_real.clear_mask(gemm_k_iterations == 0); + iterator_B_imag.clear_mask(gemm_k_iterations == 0); + } + + warp_mma_planar_complex( + warp_mma, + accum, + warp_frag_real_A[warp_mma_k % 2], + warp_frag_imag_A[warp_mma_k % 2], + warp_frag_real_B[warp_mma_k % 2], + warp_frag_imag_B[warp_mma_k % 2]); + } + + } + + + // Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop + cutlass::arch::cp_async_fence(); + cutlass::arch::cp_async_wait<0>(); + __syncthreads(); + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_pipelined.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_pipelined.h new file mode 100644 index 0000000000000000000000000000000000000000..d6beec459bbb75fe174d6f0e043df185ec08a0ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_pipelined.h @@ -0,0 +1,424 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/aligned_buffer.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/threadblock/mma_planar_complex_base.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Transformation applied to A + ComplexTransform TransformA = ComplexTransform::kNone, + /// Transformation applied to B + ComplexTransform TransformB = ComplexTransform::kNone +> +class MmaPlanarComplexPipelined : + public MmaPlanarComplexBase { +public: + ///< Base class + using Base = MmaPlanarComplexBase; + + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + + ///< Iterates over tiles of A operand in global memory + using IteratorA = IteratorA_; + + ///< Iterates over tiles of B operand in global memory + using IteratorB = IteratorB_; + + ///< Data type of accumulator matrix + using ElementC = ElementC_; + + ///< Layout of accumulator matrix + using LayoutC = LayoutC_; + + ///< Policy describing tuning details + using Policy = Policy_; + + using ArchTag = typename Policy::Operator::ArchTag; + + using SmemIteratorA = SmemIteratorA_; + using SmemIteratorB = SmemIteratorB_; + + /// Transformation applied to A + static ComplexTransform const kTransformA = TransformA; + + /// Transformation applied to B + static ComplexTransform const kTransformB = TransformB; + + // + // Dependent types + // + + /// Fragment of accumulator tile + using FragmentC = ArrayPlanarComplex< + typename Policy::Operator::FragmentC::Element, + Policy::Operator::FragmentC::kElements + >; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + private: + + using FragmentA = typename IteratorA::Fragment; + using FragmentB = typename IteratorB::Fragment; + using WarpFragmentA = typename Operator::FragmentA; + using WarpFragmentB = typename Operator::FragmentB; + + private: + + // + // Data members + // + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + MmaPlanarComplexPipelined( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + typename Base::SharedStorage &shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx + ): + Base(shared_storage, thread_idx, warp_idx, lane_idx), + smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), + smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) + { + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); + } + +private: + + CUTLASS_DEVICE + void warp_mma_planar_complex( + Operator & warp_mma, + FragmentC &accum, + WarpFragmentA const & real_A, + WarpFragmentA const & imag_A, + WarpFragmentB const & real_B, + WarpFragmentB const & imag_B) { + + cutlass::negate> neg_op_B; + + WarpFragmentB neg_real_B = neg_op_B(real_B); + WarpFragmentB neg_imag_B = neg_op_B(imag_B); + + warp_mma(accum.real, real_A, real_B, accum.real); + + if (kTransformB == ComplexTransform::kNone) { + warp_mma(accum.imag, real_A, imag_B, accum.imag); + } + else { + warp_mma(accum.imag, real_A, neg_imag_B, accum.imag); + } + + if (kTransformA == ComplexTransform::kNone) { + warp_mma(accum.imag, imag_A, real_B, accum.imag); + } + else { + warp_mma(accum.imag, imag_A, neg_real_B, accum.imag); + } + + if (kTransformA == ComplexTransform::kNone ^ kTransformB == ComplexTransform::kNone) { + warp_mma(accum.real, imag_A, imag_B, accum.real); + } + else { + warp_mma(accum.real, imag_A, neg_imag_B, accum.real); + } + } + +public: + + /// Perform a threadblock-scoped matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + ///< problem size of GEMM + int gemm_k_iterations, + ///< destination accumulator tile + FragmentC &accum, + ///< iterator over A operand in global memory + IteratorA iterator_A_real, + ///< iterator over A operand in global memory + IteratorA iterator_A_imag, + ///< iterator over B operand in global memory + IteratorB iterator_B_real, + ///< iterator over B operand in global memory + IteratorB iterator_B_imag, + ///< initial value of accumulator + FragmentC const &src_accum) { + + // + // Prologue + // + + // Perform accumulation in the 'd' output operand + accum = src_accum; + + FragmentA tb_frag_A_real; + FragmentA tb_frag_A_imag; + + FragmentB tb_frag_B_real; + FragmentB tb_frag_B_imag; + + tb_frag_A_real.clear(); + tb_frag_A_imag.clear(); + + tb_frag_B_real.clear(); + tb_frag_B_imag.clear(); + + // The last kblock is loaded in the prolog + iterator_A_real.load(tb_frag_A_real); + iterator_A_imag.load(tb_frag_A_imag); + + iterator_B_real.load(tb_frag_B_real); + iterator_B_imag.load(tb_frag_B_imag); + + ++iterator_A_real; + ++iterator_A_imag; + + ++iterator_B_real; + ++iterator_B_imag; + + this->smem_iterator_A_.store(tb_frag_A_real); + this->smem_iterator_A_.store_with_pointer_offset(tb_frag_A_imag, Base::SharedStorage::kImaginaryStrideA); + + this->smem_iterator_B_.store(tb_frag_B_real); + this->smem_iterator_B_.store_with_pointer_offset(tb_frag_B_imag, Base::SharedStorage::kImaginaryStrideB); + + ++this->smem_iterator_A_; + ++this->smem_iterator_B_; + + __syncthreads(); + + // Pair of fragments used to overlap shared memory loads and math instructions + WarpFragmentA warp_frag_real_A[2]; + WarpFragmentA warp_frag_imag_A[2]; + + WarpFragmentB warp_frag_real_B[2]; + WarpFragmentB warp_frag_imag_B[2]; + + this->warp_tile_iterator_A_.set_kgroup_index(0); + this->warp_tile_iterator_B_.set_kgroup_index(0); + + this->warp_tile_iterator_A_.load(warp_frag_real_A[0]); + this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[0], Base::SharedStorage::kImaginaryStrideA); + + this->warp_tile_iterator_B_.load(warp_frag_real_B[0]); + this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[0], Base::SharedStorage::kImaginaryStrideB); + + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + Operator warp_mma; + + int smem_write_stage_idx = 1; + + // Avoid reading out of bounds + iterator_A_real.clear_mask(gemm_k_iterations <= 1); + iterator_A_imag.clear_mask(gemm_k_iterations <= 1); + + iterator_B_real.clear_mask(gemm_k_iterations <= 1); + iterator_B_imag.clear_mask(gemm_k_iterations <= 1); + + // Issue loads during the first warp-level matrix multiply-add *AFTER* issuing + // shared memory loads (which have the tightest latency requirement). + + // + // Mainloop + // + + // Note: The main loop does not support Base::kWarpGemmIterations == 2. + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > 0; --gemm_k_iterations) { + // + // Loop over GEMM K dimension + // + + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { + + // Load warp-level tiles from shared memory, wrapping to k offset if this is the last group + // as the case may be. + + if (warp_mma_k == Base::kWarpGemmIterations - 1) { + + // Write fragments to shared memory + this->smem_iterator_A_.store(tb_frag_A_real); + this->smem_iterator_A_.store_with_pointer_offset(tb_frag_A_imag, Base::SharedStorage::kImaginaryStrideA); + + this->smem_iterator_B_.store(tb_frag_B_real); + this->smem_iterator_B_.store_with_pointer_offset(tb_frag_B_imag, Base::SharedStorage::kImaginaryStrideB); + + __syncthreads(); + + ++this->smem_iterator_B_; + ++this->smem_iterator_A_; + + // Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory + if (smem_write_stage_idx == 1) { + this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); + this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + } + else { + this->warp_tile_iterator_A_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, + 0}); + } + + smem_write_stage_idx ^= 1; + } + + this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + + this->warp_tile_iterator_A_.load(warp_frag_real_A[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideA); + + this->warp_tile_iterator_B_.load(warp_frag_real_B[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideB); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + if (warp_mma_k == 0) { + + iterator_A_real.load(tb_frag_A_real); + iterator_A_imag.load(tb_frag_A_imag); + + iterator_B_real.load(tb_frag_B_real); + iterator_B_imag.load(tb_frag_B_imag); + + ++iterator_A_real; + ++iterator_A_imag; + ++iterator_B_real; + ++iterator_B_imag; + + // Avoid reading out of bounds if this was the last loop iteration + iterator_A_real.clear_mask(gemm_k_iterations <= 2); + iterator_A_imag.clear_mask(gemm_k_iterations <= 2); + iterator_B_real.clear_mask(gemm_k_iterations <= 2); + iterator_B_imag.clear_mask(gemm_k_iterations <= 2); + } + + warp_mma_planar_complex( + warp_mma, + accum, + warp_frag_real_A[warp_mma_k % 2], + warp_frag_imag_A[warp_mma_k % 2], + warp_frag_real_B[warp_mma_k % 2], + warp_frag_imag_B[warp_mma_k % 2]); + } + } + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_singlestage.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_singlestage.h new file mode 100644 index 0000000000000000000000000000000000000000..3ce8ac805410292efc00cf2985bbe046cf51ec4c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_singlestage.h @@ -0,0 +1,265 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/aligned_buffer.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/threadblock/mma_base.h" + + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Used for partial specialization + typename Enable = bool +> +class MmaSingleStage : public MmaBase { +public: + + ///< Base class + using Base = MmaBase; + + using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory + using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory + using ElementC = ElementC_; ///< Data type of accumulator matrix + using LayoutC = LayoutC_; ///< Layout of accumulator matrix + using Policy = Policy_; ///< Policy describing tuning details + + using SmemIteratorA = SmemIteratorA_; + using SmemIteratorB = SmemIteratorB_; + + // + // Dependent types + // + + /// Fragment of operand A loaded from global memory + using FragmentA = typename IteratorA::Fragment; + + /// Fragment of operand B loaded from global memory + using FragmentB = typename IteratorB::Fragment; + + /// Fragment of accumulator tile + using FragmentC = typename Policy::Operator::FragmentC; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + using ArchTag = arch::Sm70; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = Operator::kTransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = Operator::kTransformB; + + // staticaly assert kStages for MmaSingleStage is 1 (single stage mma pipeline) + static_assert((Base::kStages==1), "MmaSingleStage requires kStages set to value 1"); +private: + + using WarpFragmentA = typename Operator::FragmentA; + using WarpFragmentB = typename Operator::FragmentB; + +protected: + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + MmaSingleStage( + typename Base::SharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM + int thread_idx, ///< ID within the threadblock + int warp_idx, ///< ID of warp + int lane_idx ///< ID of each thread within a warp + ): + Base(shared_storage, thread_idx, warp_idx, lane_idx), + smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), + smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) { + + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); + + } + + /// Perform a threadblock-scoped matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + int gemm_k_iterations, ///< number of iterations of the mainloop + FragmentC &accum, ///< destination accumulator tile + IteratorA iterator_A, ///< iterator over A operand in global memory + IteratorB iterator_B, ///< iterator over B operand in global memory + FragmentC const &src_accum) { ///< source accumualtor tile + + // + // Prologue + // + + // Perform accumulation in the 'd' output operand + accum = src_accum; + + FragmentA tb_frag_A; + FragmentB tb_frag_B; + + tb_frag_A.clear(); + tb_frag_B.clear(); + + // The last kblock is loaded in the prolog + iterator_A.load(tb_frag_A); + iterator_B.load(tb_frag_B); + + ++iterator_A; + ++iterator_B; + + // Pair of fragments used to overlap shared memory loads and math instructions + WarpFragmentA warp_frag_A; + WarpFragmentB warp_frag_B; + + Operator warp_mma; + + // Avoid reading out of bounds + iterator_A.clear_mask(gemm_k_iterations <= 1); + iterator_B.clear_mask(gemm_k_iterations <= 1); + + // + // Mainloop + // + + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > 0; --gemm_k_iterations) { + this->smem_iterator_A_.store(tb_frag_A); + this->smem_iterator_B_.store(tb_frag_B); + + __syncthreads(); + + // + // Loop over GEMM K dimension + // + + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { + + // Load warp-level tiles from shared memory, wrapping to k offset if this is the last group + // as the case may be. + + this->warp_tile_iterator_A_.set_kgroup_index(warp_mma_k % Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.set_kgroup_index(warp_mma_k % Base::kWarpGemmIterations); + + this->warp_tile_iterator_A_.load(warp_frag_A); + this->warp_tile_iterator_B_.load(warp_frag_B); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + warp_mma(accum, warp_frag_A, warp_frag_B, accum); + } + + // Add negative offsets to return smem load iterators to the 'start' of the shared memory + this->warp_tile_iterator_A_.add_tile_offset({0, -Policy::kPartitionsK * Base::kWarpGemmIterations}); + this->warp_tile_iterator_B_.add_tile_offset({-Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); + + __syncthreads(); + + iterator_A.load(tb_frag_A); + iterator_B.load(tb_frag_B); + + ++iterator_A; + ++iterator_B; + + // Avoid reading out of bounds if this was the last loop iteration + iterator_A.clear_mask(gemm_k_iterations <= 2); + iterator_B.clear_mask(gemm_k_iterations <= 2); + } + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_softmax_mainloop_fusion_multistage.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_softmax_mainloop_fusion_multistage.h new file mode 100644 index 0000000000000000000000000000000000000000..0f54c8bfa4e2e60ef0bb2677e3283ae908efcb1a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_softmax_mainloop_fusion_multistage.h @@ -0,0 +1,756 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. + + It loads two loop invariant vectors, norm and sum, in the prologue and + stores them in the register file. We will call elementwise operation to + apply norm and sum between ldmatrix and warp mma. +*/ + +#pragma once + +#include "cutlass/aligned_buffer.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" +#include "cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h" +#include "cutlass/gemm/threadblock/mma_base.h" +#include "cutlass/gemm/warp/softmax_scale_bias_transform.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Used for partial specialization + typename Enable = bool> +class MmaMainloopFusionBase { + public: + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + + ///< Policy describing tuning details + using Policy = Policy_; + + // + // Dependent types + // + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Shape describing the overall GEMM computed from shared memory + /// by each warp. + using WarpGemm = typename Policy::Operator::Shape; + + /// Shape describing the number of warps filling the CTA + using WarpCount = cutlass::gemm::GemmShape; + + /// Number of warp-level GEMM oeprations + static int const kWarpGemmIterations = + (WarpGemm::kK / Operator::Policy::MmaShape::kK); + + /// Number of stages + static int const kStages = Stages; + + /// Tensor reference to the A operand + using TensorRefA = TensorRef; + + /// Tensor reference to the B operand + using TensorRefB = TensorRef; + + // + // Nested structs + // + + /// Shared storage object needed by threadblock-scoped GEMM + class SharedStorage { + public: + // + // Type definitions + // + + /// Shape of the A matrix operand in shared memory + using ShapeA = MatrixShape; + + /// Shape of the B matrix operand in shared memory + using ShapeB = + MatrixShape; + + public: + // + // Data members + // + + /// Buffer for A operand + AlignedBuffer operand_A; + + /// Buffer for B operand + AlignedBuffer operand_B; + + public: + + // + // Methods + // + + /// Returns a layout object for the A matrix + CUTLASS_DEVICE + static typename Operator::LayoutA LayoutA() { + return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn}); + } + + /// Returns a layout object for the B matrix + CUTLASS_HOST_DEVICE + static typename Operator::LayoutB LayoutB() { + return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn}); + } + + /// Returns a TensorRef to the A operand + CUTLASS_HOST_DEVICE + TensorRefA operand_A_ref() { + return TensorRefA{operand_A.data(), LayoutA()}; + } + + /// Returns a TensorRef to the B operand + CUTLASS_HOST_DEVICE + TensorRefB operand_B_ref() { + return TensorRefB{operand_B.data(), LayoutB()}; + } + }; + + protected: + + // + // Data members + // + + /// Iterator to load a warp-scoped tile of A operand from shared memory + typename Operator::IteratorA warp_tile_iterator_A_; + + /// Iterator to load a warp-scoped tile of B operand from shared memory + typename Operator::IteratorB warp_tile_iterator_B_; + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + MmaMainloopFusionBase( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + SharedStorage &shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx) + : warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx), + warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) {} +}; + + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Cache operation for operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Cache operation for operand B + cutlass::arch::CacheOperation::Kind CacheOpB, + /// Iterates over vectors of var and mean vector in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorNormSum_, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Whether problem has been transformed. This determines to which operand + /// the softmax is applied. + bool InternalTranspose, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone, + /// Used for partial specialization + typename Enable = bool> +class MmaSoftmaxMainloopFusionMultistage : + public MmaMainloopFusionBase { +public: + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + ///< Iterates over tiles of A operand in global memory + using IteratorA = IteratorA_; + ///< Iterates over tiles of B operand in global memory + using IteratorB = IteratorB_; + ///< Iterates over tiles of the var and mean vectors in global memory + using IteratorNormSum = IteratorNormSum_; + ///< Policy describing tuning details + using Policy = Policy_; + + ///< Base class + using Base = MmaMainloopFusionBase; + + ///< Data type of accumulator matrix + using ElementC = ElementC_; + ///< Layout of accumulator matrix + using LayoutC = LayoutC_; + + using SmemIteratorA = SmemIteratorA_; + using SmemIteratorB = SmemIteratorB_; + + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + // + // Dependent types + // + + /// Fragment of accumulator tile + using FragmentC = typename Policy::Operator::FragmentC; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Minimum architecture is Sm80 to support cp.async + using ArchTag = arch::Sm80; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = Operator::kTransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = Operator::kTransformB; + + /// Internal structure exposed for introspection. + struct Detail { + + static_assert(Base::kWarpGemmIterations > 1, + "The pipelined structure requires at least two warp-level " + "GEMM operations."); + + /// Number of cp.async instructions to load one stage of operand A + static int const AsyncCopyIterationsPerStageA = + IteratorA::ThreadMap::Iterations::kCount; + + /// Number of cp.async instructions to load one stage of operand B + static int const AsyncCopyIterationsPerStageB = + IteratorB::ThreadMap::Iterations::kCount; + + /// Number of stages + static int const kStages = Stages; + + /// Number of cp.async instructions to load on group of operand A + static int const kAccessesPerGroupA = + (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + + /// Number of cp.async instructions to load on group of operand B + static int const kAccessesPerGroupB = + (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + }; + + private: + + using WarpLoadedFragmentA = typename Operator::FragmentA; + using WarpLoadedFragmentB = typename Operator::FragmentB; + using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; + using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; + + using WarpLoadedFragmentNormSum = typename IteratorNormSum::Fragment; + + static bool const kInternalTranspose = InternalTranspose; + + using SoftmaxFragment = typename platform::conditional::type; + + + private: + + // + // Data members + // + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + + int warp_idx_m_; + + int warp_idx_n_; + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + MmaSoftmaxMainloopFusionMultistage( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + typename Base::SharedStorage &shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx + ): + Base(shared_storage, thread_idx, warp_idx, lane_idx), + smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), + smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) + { + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + warp_idx_m_ = warp_idx_mn % Base::WarpCount::kM; + warp_idx_n_ = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset( + {warp_idx_m_, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset( + {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n_}); + } + + CUTLASS_DEVICE + void copy_tiles_and_advance(IteratorA &iterator_A, + IteratorB &iterator_B, + int group_start_A = 0, int group_start_B = 0) { + iterator_A.set_iteration_index(group_start_A * + IteratorA::kAccessesPerVector); + this->smem_iterator_A_.set_iteration_index(group_start_A); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { + if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_A.get(); + + if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, iterator_A.valid()); + } else { + cutlass::arch::cp_async( + dst_ptr + v, gmem_ptr, iterator_A.valid()); + } + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + } + + iterator_B.set_iteration_index(group_start_B * + IteratorB::kAccessesPerVector); + this->smem_iterator_B_.set_iteration_index(group_start_B); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { + if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_B_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_B.get(); + + if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, iterator_B.valid()); + } else { + cutlass::arch::cp_async( + dst_ptr + v, gmem_ptr, iterator_B.valid()); + } + + ++iterator_B; + } + ++this->smem_iterator_B_; + } + } + } + + /// Perform a threadblock-scoped matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + ///< problem size of GEMM + int gemm_k_iterations, + ///< destination accumulator tile + FragmentC &accum, + ///< iterator over A operand in global memory + IteratorA iterator_A, + ///< iterator over B operand in global memory + IteratorB iterator_B, + ///< iterator over B operand in global memory + IteratorNormSum iterator_norm_sum, + ///< initial value of accumulator + FragmentC const &src_accum) { + + // + // Prologue + // + // Issue several complete stages + + WarpLoadedFragmentNormSum warp_loaded_frag_norm_sum; + iterator_norm_sum.add_tile_offset({0, warp_idx_m_}); + iterator_norm_sum.load(warp_loaded_frag_norm_sum); + + CUTLASS_PRAGMA_UNROLL + for (int stage = 0; stage < Base::kStages - 1; + ++stage, --gemm_k_iterations) { + + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + + iterator_A.set_iteration_index(0); + this->smem_iterator_A_.set_iteration_index(0); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + int const kSrcBytes = + sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + int src_bytes = (iterator_A.valid() ? kSrcBytes : 0); + + cutlass::arch::cp_async_zfill( + dst_ptr + v, iterator_A.get(), iterator_A.valid()); + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + + iterator_B.set_iteration_index(0); + this->smem_iterator_B_.set_iteration_index(0); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_B_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + int const kSrcBytes = + sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + cutlass::arch::cp_async_zfill( + dst_ptr + v, iterator_B.get(), iterator_B.valid()); + + ++iterator_B; + } + + ++this->smem_iterator_B_; + } + + // Move to the next stage + iterator_A.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + + // Defines the boundary of a stage of cp.async. + cutlass::arch::cp_async_fence(); + } + + // Perform accumulation in the 'd' output operand + accum = src_accum; + + // Waits until kStages-2 stages have committed. + cutlass::arch::cp_async_wait(); + __syncthreads(); + + // Pair of fragments used to overlap shared memory loads and math + // instructions + WarpLoadedFragmentA warp_loaded_frag_A[2]; + WarpLoadedFragmentB warp_loaded_frag_B[2]; + WarpTransformedFragmentA warp_transformed_frag_A[2]; + WarpTransformedFragmentB warp_transformed_frag_B[2]; + + Operator warp_mma; + cutlass::gemm::warp::SoftmaxScaleBiasTransform< + SoftmaxFragment, WarpLoadedFragmentNormSum> elementwise_transform; + + this->warp_tile_iterator_A_.set_kgroup_index(0); + this->warp_tile_iterator_B_.set_kgroup_index(0); + + this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + + // Start issuing the first group of the next stage outside of the mainloop + copy_tiles_and_advance(iterator_A, iterator_B); + + int smem_write_stage_idx = Base::kStages - 1; + int smem_read_stage_idx = 0; + + warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], + warp_loaded_frag_A[0], warp_loaded_frag_B[0]); + + if (kInternalTranspose) { + elementwise_transform(warp_transformed_frag_B[0], + warp_loaded_frag_norm_sum); + } else { + elementwise_transform(warp_transformed_frag_A[0], + warp_loaded_frag_norm_sum); + } + + // + // Mainloop + // + + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > (-Base::kStages + 1);) { + // + // Loop over GEMM K dimension + // + + // Computes a warp-level GEMM on data held in shared memory + // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; + ++warp_mma_k) { + + // Load warp-level tiles from shared memory, wrapping to k offset if + // this is the last group as the case may be. + + this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + + this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + if (warp_mma_k > 0) { + warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + warp_loaded_frag_A[warp_mma_k % 2], + warp_loaded_frag_B[warp_mma_k % 2]); + + if (kInternalTranspose) { + elementwise_transform(warp_transformed_frag_B[warp_mma_k % 2], + warp_loaded_frag_norm_sum); + } else { + elementwise_transform(warp_transformed_frag_A[warp_mma_k % 2], + warp_loaded_frag_norm_sum); + } + } + + // Issue global->shared copies for the next stage + int group_start_iteration_A, group_start_iteration_B; + + if (warp_mma_k + 1 == Base::kWarpGemmIterations) { + group_start_iteration_A = 0; + group_start_iteration_B = 0; + } else { + group_start_iteration_A = + (warp_mma_k + 1) * Detail::kAccessesPerGroupA; + group_start_iteration_B = + (warp_mma_k + 1) * Detail::kAccessesPerGroupB; + } + + copy_tiles_and_advance(iterator_A, iterator_B, + group_start_iteration_A, + group_start_iteration_B); + + warp_mma( + accum, + warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + accum + ); + + if (warp_mma_k + 2 == Base::kWarpGemmIterations) { + + // Inserts a memory fence between stages of cp.async instructions. + cutlass::arch::cp_async_fence(); + + // Waits until kStages-2 stages have committed. + arch::cp_async_wait(); + __syncthreads(); + + // Move to the next stage + iterator_A.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + + // Add negative offsets to return iterators to the 'start' of the + // circular buffer in shared memory + if (smem_write_stage_idx == (Base::kStages - 1)) { + this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); + this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + smem_write_stage_idx = 0; + } else { + ++smem_write_stage_idx; + } + + if (smem_read_stage_idx == (Base::kStages - 1)) { + this->warp_tile_iterator_A_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations}); + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations, + 0}); + smem_read_stage_idx = 0; + } else { + ++smem_read_stage_idx; + } + + --gemm_k_iterations; + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + } + + // Do any conversions feeding the first stage at the end of the loop so + // we can start right away on mma instructions + if (warp_mma_k + 1 == Base::kWarpGemmIterations) { + warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], + warp_transformed_frag_B[(warp_mma_k + 1) % 2], + warp_loaded_frag_A[(warp_mma_k + 1) % 2], + warp_loaded_frag_B[(warp_mma_k + 1) % 2]); + + if (kInternalTranspose) { + elementwise_transform(warp_transformed_frag_B[(warp_mma_k + 1) % 2], + warp_loaded_frag_norm_sum); + } else { + elementwise_transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], + warp_loaded_frag_norm_sum); + } + } + } + + } + + if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { + // commit and drain all pending and predicated cp.async pnz from the GEMM mainloop + cutlass::arch::cp_async_fence(); + cutlass::arch::cp_async_wait<0>(); + __syncthreads(); + } + + // Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop + cutlass::arch::cp_async_fence(); + cutlass::arch::cp_async_wait<0>(); + __syncthreads(); + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_sparse_base.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_sparse_base.h new file mode 100644 index 0000000000000000000000000000000000000000..9f82a7ff81b2c85d0a75e26ab1e6e259ea9d67f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_sparse_base.h @@ -0,0 +1,273 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. +*/ + +#pragma once + +#include "cutlass/aligned_buffer.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Policy object describing MmaTensorOp +template < + /// Warp-level GEMM operator (concept: gemm::warp::Mma) + typename Operator_, + /// Padding used for A operand in shared memory (concept: MatrixShape) + typename SmemPaddingA_, + /// Padding used for B operand in shared memory (concept: MatrixShape) + typename SmemPaddingB_, + /// Padding used for E operand in shared memory (concept: MatrixShape) + typename SmemPaddingE_, + /// Number of partitions of K dimension of GEMM + int PartitionsK = 1> +struct SparseMmaPolicy { + /// Warp-level GEMM operator (concept: gemm::warp::MmaTensorOp or gemm::warp::MmaSimt) + using Operator = Operator_; + + /// Padding used for A operand in shared memory + using SmemPaddingA = SmemPaddingA_; + + /// Padding used for B operand in shared memory + using SmemPaddingB = SmemPaddingB_; + + /// Padding used for B operand in shared memory + using SmemPaddingE = SmemPaddingE_; + + /// Number of partitions of K dimension + static int const kPartitionsK = PartitionsK; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Used for partial specialization + typename Enable = bool> +class SparseMmaBase { + public: + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + + ///< Policy describing tuning details + using Policy = Policy_; + + // + // Dependent types + // + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Shape describing the overall GEMM computed from shared memory + /// by each warp. + using WarpGemm = typename Policy::Operator::Shape; + + /// Shape describing the number of warps filling the CTA + using WarpCount = GemmShape; + + /// Number of warp-level GEMM oeprations + static int const kWarpGemmIterations = + (WarpGemm::kK / Operator::Policy::MmaShape::kK); + + static_assert(kWarpGemmIterations > 1, + "The pipelined structure requires at least two warp-level " + "GEMM operations."); + + static_assert((kWarpGemmIterations % 2) == 0, + "Inner loop iteration must be an even number."); + + /// Number of stages + static int const kStages = Stages; + + static int const kSparse = Operator::kSparse; + + static int const kElementsPerElementE = Operator::kElementsPerElementE; + + /// Tensor reference to the A operand + using TensorRefA = TensorRef; + + /// Tensor reference to the B operand + using TensorRefB = TensorRef; + + /// Tensor reference to the E operand + using TensorRefE = TensorRef; + + // + // Nested structs + // + + /// Shared storage object needed by threadblock-scoped GEMM + class SharedStorage { + public: + // + // Type definitions + // + + /// Shape of the A matrix operand in shared memory + using ShapeA = MatrixShape; + + /// Shape of the B matrix operand in shared memory + using ShapeB = + MatrixShape; + + /// Shape of the E matrix operand in shared memory + using ShapeE = + MatrixShape; + + public: + // + // Data members + // + + /// Buffer for A operand + AlignedBuffer operand_A; + + /// Buffer for B operand + AlignedBuffer operand_B; + + /// Buffer for E operand + AlignedBuffer operand_E; + + public: + + // + // Methods + // + + /// Returns a layout object for the A matrix + CUTLASS_DEVICE + static typename Operator::LayoutA LayoutA() { + return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn}); + } + + /// Returns a layout object for the B matrix + CUTLASS_HOST_DEVICE + static typename Operator::LayoutB LayoutB() { + return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn}); + } + + /// Returns a layout object for the E matrix + CUTLASS_HOST_DEVICE + static typename Operator::LayoutE LayoutE() { + return Operator::LayoutE::packed({ShapeE::kRow, ShapeE::kColumn}); + } + + /// Returns a TensorRef to the A operand + CUTLASS_HOST_DEVICE + TensorRefA operand_A_ref() { + return TensorRefA{operand_A.data(), LayoutA()}; + } + + /// Returns a TensorRef to the B operand + CUTLASS_HOST_DEVICE + TensorRefB operand_B_ref() { + return TensorRefB{operand_B.data(), LayoutB()}; + } + + /// Returns a TensorRef to the E operand + CUTLASS_HOST_DEVICE + TensorRefE operand_E_ref() { + return TensorRefE{operand_E.data(), LayoutE()}; + } + }; + + protected: + + // + // Data members + // + + /// Iterator to load a warp-scoped tile of A operand from shared memory + typename Operator::IteratorA warp_tile_iterator_A_; + + /// Iterator to load a warp-scoped tile of B operand from shared memory + typename Operator::IteratorB warp_tile_iterator_B_; + + /// Iterator to load a warp-scoped tile of E operand from shared memory + typename Operator::IteratorE warp_tile_iterator_E_; + + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + SparseMmaBase( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + SharedStorage &shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx + ): + warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx), + warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx), + warp_tile_iterator_E_(shared_storage.operand_E_ref(), lane_idx) { + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_sparse_multistage.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_sparse_multistage.h new file mode 100644 index 0000000000000000000000000000000000000000..4b35e69603c82556e50e50a47fbde0f409d2a9f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_sparse_multistage.h @@ -0,0 +1,668 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. +*/ + +#pragma once + +#include "cutlass/aligned_buffer.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/threadblock/mma_sparse_base.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Cache operation for operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Cache operation for operand B + cutlass::arch::CacheOperation::Kind CacheOpB, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Iterates over tiles of E operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorE_, + /// Iterates over tiles of E operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorE_, + /// Cache operation for operand E + cutlass::arch::CacheOperation::Kind CacheOpE, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Used for partial specialization + typename Enable = bool> +class SparseMmaMultistage : + public SparseMmaBase { +public: + ///< Base class + using Base = SparseMmaBase; + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + ///< Iterates over tiles of A operand in global memory + using IteratorA = IteratorA_; + ///< Iterates over tiles of B operand in global memory + using IteratorB = IteratorB_; + ///< Iterates over tiles of E operand in global memory + using IteratorE = IteratorE_; + ///< Data type of accumulator matrix + using ElementC = ElementC_; + ///< Layout of accumulator matrix + using LayoutC = LayoutC_; + ///< Policy describing tuning details + using Policy = Policy_; + + using SmemIteratorA = SmemIteratorA_; + using SmemIteratorB = SmemIteratorB_; + using SmemIteratorE = SmemIteratorE_; + + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + static cutlass::arch::CacheOperation::Kind const kCacheOpE = CacheOpE; + + static int const kSparse = Policy::Operator::kSparse; + static int const kMetaSizeInBits = Policy::Operator::kMetaSizeInBits; + static int const kMaxID2 = Policy::Operator::kMaxID2; + static int const kElementsPerElementE = + Policy::Operator::kElementsPerElementE; + + // + // Dependent types + // + + /// Fragment of accumulator tile + using FragmentC = typename Policy::Operator::FragmentC; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// ElementE + using ElementE = typename IteratorE::Element; + + /// LayoutE + using LayoutE = typename IteratorE::Layout; + + /// Minimum architecture is Sm80 to support cp.async + using ArchTag = arch::Sm80; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = Operator::kTransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = Operator::kTransformB; + + /// Internal structure exposed for introspection. + struct Detail { + + /// Number of async copies to load one stage of operand A + static int const TBLoadIterationsA = + IteratorA::ThreadMap::Iterations::kCount; + + /// Number of async copies to load one stage of operand B + static int const TBLoadIterationsB = + IteratorB::ThreadMap::Iterations::kCount; + + /// Number of async copies to load one stage of operand E + static int const TBLoadIterationsE = + IteratorE::ThreadMap::Iterations::kCount; + + /// Number of stages + static int const kStages = Stages; + + /// Number of async copies to load one group of operand A + static int const kAccessesPerGroupA = + (TBLoadIterationsA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + + /// Number of async copies to load one group of operand B + static int const kAccessesPerGroupB = + (TBLoadIterationsB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + + /// Number of async copies to load one group of operand E + static int const kAccessesPerGroupE = + (TBLoadIterationsE + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + + /// E operand is tiny. For the most of time, not all the warps are needed + /// to load it from the global memory. + static int const kValidWarps = IteratorE::ThreadMap::kThreads / 32; + + /// B operand is twice as big as A which brings very high register pressure. + /// We have to sacrifice the double buffer when the warp tile size is big. + static int const kBBufferSize = + ((sizeof(typename Operator::ElementC) == 4) && + ((platform::is_same::value && + platform::is_same::value)) && + (Operator::Shape::kM >= 64 && Operator::Shape::kN >= 64)) + ? 1 + : 2; + }; + + private: + + using WarpLoadedFragmentA = typename Operator::FragmentA; + using WarpLoadedFragmentB = typename Operator::FragmentB; + using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; + using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; + using WarpFragmentE = typename Operator::FragmentE; + + private: + + // + // Data members + // + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + + /// Iterator to write threadblock-scoped tile of E operand to shared memory + SmemIteratorE smem_iterator_E_; + + /// Warp id + bool is_warp_valid_; + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + SparseMmaMultistage( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + typename Base::SharedStorage &shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx + ): + Base(shared_storage, thread_idx, warp_idx, lane_idx), + smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), + smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx), + smem_iterator_E_(shared_storage.operand_E_ref(), thread_idx) + { + is_warp_valid_ = warp_idx < Detail::kValidWarps; + + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset( + {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset( + {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); + this->warp_tile_iterator_E_.add_tile_offset( + {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + } + + CUTLASS_DEVICE + void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B, + IteratorE &iterator_E, int group_start_A = 0, + int group_start_B = 0, int group_start_E = 0) { + iterator_A.set_iteration_index(group_start_A * + IteratorA::kAccessesPerVector); + this->smem_iterator_A_.set_iteration_index(group_start_A); + + // async copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { + if (group_start_A + j < Detail::TBLoadIterationsA) { + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_A.get(); + + cutlass::arch::cp_async( + dst_ptr + v, gmem_ptr, iterator_A.valid()); + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + } + + iterator_B.set_iteration_index(group_start_B * + IteratorB::kAccessesPerVector); + this->smem_iterator_B_.set_iteration_index(group_start_B); + + // async copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { + if (group_start_B + j < Detail::TBLoadIterationsB) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_B_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_B.get(); + + cutlass::arch::cp_async( + dst_ptr + v, gmem_ptr, iterator_B.valid()); + + ++iterator_B; + } + ++this->smem_iterator_B_; + } + } + + iterator_E.set_iteration_index(group_start_E); + this->smem_iterator_E_.set_iteration_index(group_start_E); + + // async copy for operand E + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupE; ++j) { + if (group_start_E + j < Detail::TBLoadIterationsE) { + typename IteratorE::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_E_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorE::ThreadMap::kElementsPerAccess / 8; + + auto gmem_ptr = iterator_E.get(); + + cutlass::arch::cp_async( + dst_ptr, gmem_ptr, iterator_E.valid() && is_warp_valid_); + + ++iterator_E; + ++this->smem_iterator_E_; + } + } + } + + /// Perform a threadblock-scoped matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + ///< problem size of GEMM + int gemm_k_iterations, + ///< destination accumulator tile + FragmentC &accum, + ///< iterator over A operand in global memory + IteratorA iterator_A, + ///< iterator over B operand in global memory + IteratorB iterator_B, + ///< iterator over E operand in global memory + IteratorE iterator_E, + ///< initial value of accumulator + FragmentC const &src_accum) { + + // + // Prologue + // + + // Issue several complete stages + CUTLASS_PRAGMA_UNROLL + for (int stage = 0; stage < Base::kStages - 1; + ++stage, --gemm_k_iterations) { + + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + iterator_E.clear_mask(gemm_k_iterations == 0); + + iterator_A.set_iteration_index(0); + this->smem_iterator_A_.set_iteration_index(0); + + // async copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::TBLoadIterationsA; ++j) { + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + int const kSrcBytes = + sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + cutlass::arch::cp_async_zfill( + dst_ptr + v, iterator_A.get(), iterator_A.valid()); + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + + iterator_B.set_iteration_index(0); + this->smem_iterator_B_.set_iteration_index(0); + + // async copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::TBLoadIterationsB; ++j) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_B_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + int const kSrcBytes = + sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + cutlass::arch::cp_async_zfill( + dst_ptr + v, iterator_B.get(), iterator_B.valid()); + + ++iterator_B; + } + + ++this->smem_iterator_B_; + } + + iterator_E.set_iteration_index(0); + this->smem_iterator_E_.set_iteration_index(0); + + // async copy for operand E + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::TBLoadIterationsE; ++j) { + typename IteratorE::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_E_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorE::ThreadMap::kElementsPerAccess / 8; + if (is_warp_valid_) + cutlass::arch::cp_async_zfill( + dst_ptr, iterator_E.get(), iterator_E.valid()); + + ++iterator_E; + + ++this->smem_iterator_E_; + } + + // Move to the next stage + iterator_A.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + iterator_E.add_tile_offset({0, 1}); + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + this->smem_iterator_E_.add_tile_offset({0, 1}); + + // cp.async.commit_group - completes a stage + cutlass::arch::cp_async_fence(); + } + + // Perform accumulation in the 'd' output operand + accum = src_accum; + + cutlass::arch::cp_async_wait(); + __syncthreads(); + + // Pair of fragments used to overlap shared memory loads and math + // instructions + WarpLoadedFragmentA warp_loaded_frag_A[2]; + WarpLoadedFragmentB warp_loaded_frag_B[Detail::kBBufferSize]; + WarpTransformedFragmentA warp_transformed_frag_A[2]; + WarpTransformedFragmentB warp_transformed_frag_B[Detail::kBBufferSize]; + WarpFragmentE warp_frag_E[2]; + + Operator warp_mma; + + this->warp_tile_iterator_A_.set_kgroup_index(0); + this->warp_tile_iterator_B_.set_kgroup_index(0); + this->warp_tile_iterator_E_.set_kgroup_index(0); + + this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); + this->warp_tile_iterator_E_.load(warp_frag_E[0]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + ++this->warp_tile_iterator_E_; + + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + iterator_E.clear_mask(gemm_k_iterations == 0); + + int smem_write_stage_idx = Base::kStages - 1; + int smem_read_stage_idx = 0; + + warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], + warp_loaded_frag_A[0], warp_loaded_frag_B[0]); + + // + // Mainloop + // + + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > (-Base::kStages + 1);) { + // + // Loop over GEMM K dimension + // + + // Computes a warp-level GEMM on data held in shared memory + // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; + ++warp_mma_k) { + + // Load warp-level tiles from shared memory, wrapping to k offset if + // this is the last group as the case may be. + + this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_E_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + + this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_E_.load(warp_frag_E[(warp_mma_k + 1) % 2]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_E_; + + if (Detail::kBBufferSize == 2) { + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.load( + warp_loaded_frag_B[(warp_mma_k + 1) % Detail::kBBufferSize]); + ++this->warp_tile_iterator_B_; + } + + if (warp_mma_k > 0) + warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % Detail::kBBufferSize], + warp_loaded_frag_A[warp_mma_k % 2], + warp_loaded_frag_B[warp_mma_k % Detail::kBBufferSize]); + + warp_mma( + accum, + warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % Detail::kBBufferSize], accum, + warp_frag_E[warp_mma_k % 2] + ); + + if (Detail::kBBufferSize == 1) { + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); + ++this->warp_tile_iterator_B_; + + } + + // Issue global->shared copies for the this stage + if (warp_mma_k < Base::kWarpGemmIterations - 1) { + int group_start_iteration_A, group_start_iteration_B, group_start_iteration_E; + + group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA; + group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB; + group_start_iteration_E = warp_mma_k * Detail::kAccessesPerGroupE; + + copy_tiles_and_advance( + iterator_A, iterator_B, iterator_E, group_start_iteration_A, + group_start_iteration_B, group_start_iteration_E); + } + + if (warp_mma_k + 2 == Base::kWarpGemmIterations) { + int group_start_iteration_A, group_start_iteration_B, group_start_iteration_E; + group_start_iteration_A = + (warp_mma_k + 1) * Detail::kAccessesPerGroupA; + group_start_iteration_B = + (warp_mma_k + 1) * Detail::kAccessesPerGroupB; + group_start_iteration_E = + (warp_mma_k + 1) * Detail::kAccessesPerGroupE; + + copy_tiles_and_advance( + iterator_A, iterator_B, iterator_E, group_start_iteration_A, + group_start_iteration_B, group_start_iteration_E); + + // Inserts a memory fence between stages of cp.async instructions. + cutlass::arch::cp_async_fence(); + + // Waits until kStages-2 stages have committed. + arch::cp_async_wait(); + __syncthreads(); + + // Move to the next stage + iterator_A.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + iterator_E.add_tile_offset({0, 1}); + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + this->smem_iterator_E_.add_tile_offset({0, 1}); + + // Add negative offsets to return iterators to the 'start' of the + // circular buffer in shared memory + if (smem_write_stage_idx == (Base::kStages - 1)) { + this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); + this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + this->smem_iterator_E_.add_tile_offset({0, -Base::kStages}); + smem_write_stage_idx = 0; + } else { + ++smem_write_stage_idx; + } + + if (smem_read_stage_idx == (Base::kStages - 1)) { + this->warp_tile_iterator_A_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations}); + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations, + 0}); + this->warp_tile_iterator_E_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations}); + smem_read_stage_idx = 0; + } else { + ++smem_read_stage_idx; + } + + --gemm_k_iterations; + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + iterator_E.clear_mask(gemm_k_iterations == 0); + } + + // Do any conversions feeding the first stage at the end of the loop so + // we can start right away on mma instructions + if (warp_mma_k + 1 == Base::kWarpGemmIterations) + warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], + warp_transformed_frag_B[(warp_mma_k + 1) % 2], + warp_loaded_frag_A[(warp_mma_k + 1) % 2], + warp_loaded_frag_B[(warp_mma_k + 1) % 2]); + } + + } + + + // Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop + cutlass::arch::cp_async_fence(); + cutlass::arch::cp_async_wait<0>(); + __syncthreads(); + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_with_reduction_multistage.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_with_reduction_multistage.h new file mode 100644 index 0000000000000000000000000000000000000000..8586c3d9ee54d545b75fb286a6fd8d3e9950651c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_with_reduction_multistage.h @@ -0,0 +1,545 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. +*/ + +#pragma once + +#include "cutlass/aligned_buffer.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/threadblock/mma_base.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Cache operation for operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Cache operation for operand B + cutlass::arch::CacheOperation::Kind CacheOpB, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone, + /// Used for partial specialization + typename Enable = bool> +class MmaWithReductionMultistage : + public MmaBase { +public: + ///< Base class + using Base = MmaBase; + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + ///< Iterates over tiles of A operand in global memory + using IteratorA = IteratorA_; + ///< Iterates over tiles of B operand in global memory + using IteratorB = IteratorB_; + ///< Data type of accumulator matrix + using ElementC = ElementC_; + ///< Layout of accumulator matrix + using LayoutC = LayoutC_; + ///< Policy describing tuning details + using Policy = Policy_; + + using SmemIteratorA = SmemIteratorA_; + using SmemIteratorB = SmemIteratorB_; + + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + // + // Dependent types + // + + /// Fragment of accumulator tile + using FragmentC = typename Policy::Operator::FragmentC; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + using FragmentReduction = typename Operator::FragmentReduction; + + /// Minimum architecture is Sm80 to support cp.async + using ArchTag = arch::Sm80; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = Operator::kTransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = Operator::kTransformB; + + static int const kReduceKForA = Operator::kReduceKForA; + + /// Internal structure exposed for introspection. + struct Detail { + + /// Number of cp.async instructions to load one stage of operand A + static int const AsyncCopyIterationsPerStageA = + IteratorA::ThreadMap::Iterations::kCount; + + /// Number of cp.async instructions to load one stage of operand B + static int const AsyncCopyIterationsPerStageB = + IteratorB::ThreadMap::Iterations::kCount; + + /// Number of stages + static int const kStages = Stages; + + /// Number of cp.async instructions to load on group of operand A + static int const kAccessesPerGroupA = + (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + + /// Number of cp.async instructions to load on group of operand B + static int const kAccessesPerGroupB = + (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; + }; + + private: + + using WarpLoadedFragmentA = typename Operator::FragmentA; + using WarpLoadedFragmentB = typename Operator::FragmentB; + using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; + using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; + + private: + + // + // Data members + // + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + +public: + + /// Construct from tensor references + CUTLASS_DEVICE + MmaWithReductionMultistage( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + typename Base::SharedStorage &shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx + ): + Base(shared_storage, thread_idx, warp_idx, lane_idx), + smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), + smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) + { + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset( + {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset( + {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); + } + + CUTLASS_DEVICE + void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B, + int group_start_A = 0, int group_start_B = 0) { + iterator_A.set_iteration_index(group_start_A * + IteratorA::kAccessesPerVector); + this->smem_iterator_A_.set_iteration_index(group_start_A); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { + if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_A.get(); + + if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, iterator_A.valid()); + } else { + cutlass::arch::cp_async( + dst_ptr + v, gmem_ptr, iterator_A.valid()); + } + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + } + + iterator_B.set_iteration_index(group_start_B * + IteratorB::kAccessesPerVector); + this->smem_iterator_B_.set_iteration_index(group_start_B); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { + if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_B_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_B.get(); + + if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, iterator_B.valid()); + } else { + cutlass::arch::cp_async( + dst_ptr + v, gmem_ptr, iterator_B.valid()); + } + + ++iterator_B; + } + ++this->smem_iterator_B_; + } + } + } + + /// Perform a threadblock-scoped matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + ///< problem size of GEMM + int gemm_k_iterations, + ///< destination accumulator tile + FragmentC &accum, + ///< iterator over A operand in global memory + IteratorA iterator_A, + ///< iterator over B operand in global memory + IteratorB iterator_B, + ///< initial value of accumulator + FragmentC const &src_accum, + FragmentReduction &gemm_k_reduction_accum) { + + // + // Prologue + // + // Issue several complete stages + + CUTLASS_PRAGMA_UNROLL + for (int stage = 0; stage < Base::kStages - 1; + ++stage, --gemm_k_iterations) { + + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + + iterator_A.set_iteration_index(0); + this->smem_iterator_A_.set_iteration_index(0); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { + typename IteratorA::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_A_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + int const kSrcBytes = + sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + int src_bytes = (iterator_A.valid() ? kSrcBytes : 0); + + cutlass::arch::cp_async_zfill( + dst_ptr + v, iterator_A.get(), iterator_A.valid()); + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + + iterator_B.set_iteration_index(0); + this->smem_iterator_B_.set_iteration_index(0); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { + typename IteratorB::AccessType *dst_ptr = + reinterpret_cast( + this->smem_iterator_B_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + int const kSrcBytes = + sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + cutlass::arch::cp_async_zfill( + dst_ptr + v, iterator_B.get(), iterator_B.valid()); + + ++iterator_B; + } + + ++this->smem_iterator_B_; + } + + // Move to the next stage + iterator_A.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + + // Defines the boundary of a stage of cp.async. + cutlass::arch::cp_async_fence(); + } + + // Perform accumulation in the 'd' output operand + accum = src_accum; + + // Waits until kStages-2 stages have committed. + cutlass::arch::cp_async_wait(); + __syncthreads(); + + // Pair of fragments used to overlap shared memory loads and math + // instructions + WarpLoadedFragmentA warp_loaded_frag_A[2]; + WarpLoadedFragmentB warp_loaded_frag_B[2]; + WarpTransformedFragmentA warp_transformed_frag_A[2]; + WarpTransformedFragmentB warp_transformed_frag_B[2]; + + Operator warp_mma; + + this->warp_tile_iterator_A_.set_kgroup_index(0); + this->warp_tile_iterator_B_.set_kgroup_index(0); + + this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + + int smem_write_stage_idx = Base::kStages - 1; + int smem_read_stage_idx = 0; + + warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], + warp_loaded_frag_A[0], warp_loaded_frag_B[0]); + + // + // Mainloop + // + + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > (-Base::kStages + 1);) { + // + // Loop over GEMM K dimension + // + + // Computes a warp-level GEMM on data held in shared memory + // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; + ++warp_mma_k) { + + // Load warp-level tiles from shared memory, wrapping to k offset if + // this is the last group as the case may be. + + this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); + + this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + if (warp_mma_k > 0) + warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + warp_loaded_frag_A[warp_mma_k % 2], + warp_loaded_frag_B[warp_mma_k % 2]); + + warp_mma( + accum, + warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + accum, + gemm_k_reduction_accum + ); + + // Issue global->shared copies for the this stage + if (warp_mma_k < Base::kWarpGemmIterations - 1) { + int group_start_iteration_A, group_start_iteration_B; + + group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA; + group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB; + + copy_tiles_and_advance(iterator_A, iterator_B, group_start_iteration_A, + group_start_iteration_B); + } + + if (warp_mma_k + 2 == Base::kWarpGemmIterations) { + int group_start_iteration_A, group_start_iteration_B; + group_start_iteration_A = + (warp_mma_k + 1) * Detail::kAccessesPerGroupA; + group_start_iteration_B = + (warp_mma_k + 1) * Detail::kAccessesPerGroupB; + + copy_tiles_and_advance(iterator_A, iterator_B, group_start_iteration_A, + group_start_iteration_B); + + // Inserts a memory fence between stages of cp.async instructions. + cutlass::arch::cp_async_fence(); + + // Waits until kStages-2 stages have committed. + arch::cp_async_wait(); + __syncthreads(); + + // Move to the next stage + iterator_A.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + + // Add negative offsets to return iterators to the 'start' of the + // circular buffer in shared memory + if (smem_write_stage_idx == (Base::kStages - 1)) { + this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); + this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + smem_write_stage_idx = 0; + } else { + ++smem_write_stage_idx; + } + + if (smem_read_stage_idx == (Base::kStages - 1)) { + this->warp_tile_iterator_A_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations}); + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy::kPartitionsK * + Base::kWarpGemmIterations, + 0}); + smem_read_stage_idx = 0; + } else { + ++smem_read_stage_idx; + } + + --gemm_k_iterations; + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + } + + // Do any conversions feeding the first stage at the end of the loop so + // we can start right away on mma instructions + if (warp_mma_k + 1 == Base::kWarpGemmIterations) + warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], + warp_transformed_frag_B[(warp_mma_k + 1) % 2], + warp_loaded_frag_A[(warp_mma_k + 1) % 2], + warp_loaded_frag_B[(warp_mma_k + 1) % 2]); + } + + } + + // commit and drain all pending and predicated cp.async pnz from the GEMM mainloop + cutlass::arch::cp_async_fence(); + cutlass::arch::cp_async_wait<0>(); + __syncthreads(); + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/threadblock_swizzle.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/threadblock_swizzle.h new file mode 100644 index 0000000000000000000000000000000000000000..dd9ce782b358c7607b425e91c972c61d31515ba9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/threadblock_swizzle.h @@ -0,0 +1,459 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Implements several possible threadblock-swizzling functions mapping blockIdx to + GEMM problems. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/platform/platform.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/conv/conv2d_problem_size.h" +#include "cutlass/conv/conv3d_problem_size.h" +#include "cutlass/gemm/threadblock/index_remat.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle_streamk.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Threadblock swizzling function for GEMMs +template +struct GemmIdentityThreadblockSwizzle { + + CUTLASS_HOST_DEVICE + GemmIdentityThreadblockSwizzle() { } + + /// Returns the shape of the problem in units of logical tiles + /// *Gemm* problem size: gemm(M, N, K) + CUTLASS_HOST_DEVICE + static GemmCoord get_tiled_shape( + GemmCoord problem_size, + GemmCoord tile_size, + int split_k_slices) { + + return GemmCoord( + (problem_size.m() + tile_size.m() - 1) / tile_size.m(), + (problem_size.n() + tile_size.n() - 1) / tile_size.n(), + split_k_slices); + } + + /// Returns the shape of the problem in units of logical tiles + /// *ImplicitGemm* Conv2d problem size: conv_operator(NPQK, NHWC, KRSC) + CUTLASS_HOST_DEVICE + static GemmCoord get_tiled_shape( + cutlass::conv::Operator conv_operator, + cutlass::conv::Conv2dProblemSize const &problem_size, + GemmCoord tile_size, + int split_k_slices) { + + gemm::GemmCoord implicit_gemm_problem_size = + cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size); + + return get_tiled_shape( + implicit_gemm_problem_size, tile_size, split_k_slices); + } + + /// Returns the shape of the problem in units of logical tiles + /// *ImplicitGemm* Conv3d problem size: conv_operator(NZPQK, NDHWC, KTRSC) + CUTLASS_HOST_DEVICE + static GemmCoord get_tiled_shape( + cutlass::conv::Operator conv_operator, + cutlass::conv::Conv3dProblemSize const &problem_size, + GemmCoord tile_size, + int split_k_slices) { + + gemm::GemmCoord implicit_gemm_problem_size = + cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size); + + return get_tiled_shape( + implicit_gemm_problem_size, tile_size, split_k_slices); + } + + /// Computes CUDA grid dimensions given a size in units of logical tiles + CUTLASS_HOST_DEVICE + static dim3 get_grid_shape(GemmCoord tiled_shape) { + int tile = 1 << get_log_tile(tiled_shape); + return dim3(tiled_shape.m() * tile, (tiled_shape.n() + tile - 1) / tile, tiled_shape.k()); + } + + /// Calculates optimal swizzle width + CUTLASS_HOST_DEVICE + static int get_log_tile(GemmCoord tiled_shape) { + auto n = tiled_shape.n(); + // Thresholds picked so that it doesn't cause too many no-op CTAs + if (N >= 8 && n >= 6) + return 3; + else if (N >= 4 && n >= 3) + return 2; + else if (N >= 2 && n >= 2) + return 1; + else + return 0; + } + + /// Obtains the threadblock offset (in units of threadblock-scoped tiles) + CUTLASS_DEVICE + static GemmCoord get_tile_offset(int log_tile) { + int block_idx_x = RematerializeBlockIdxX(); + int block_idx_y = RematerializeBlockIdxY(); + int block_idx_z = RematerializeBlockIdxZ(); + + return GemmCoord{(block_idx_x >> log_tile), // + (block_idx_y << log_tile) + ((block_idx_x) & ((1 << (log_tile)) - 1)), + block_idx_z}; + } + + /// Obtains the threadblock offset (in units of threadblock-scoped tiles) + CUTLASS_DEVICE + static GemmCoord get_tile_offset(GemmCoord tiled_shape) { + + int const kTile = N; + int block_idx_x = RematerializeBlockIdxX(); + int block_idx_y = RematerializeBlockIdxY(); + + if ((tiled_shape.m() < kTile) || (tiled_shape.n() < kTile)) + return GemmCoord{block_idx_x, block_idx_y, RematerializeBlockIdxZ()}; + + return GemmCoord{ + (block_idx_x / kTile), + (block_idx_y * kTile) + (block_idx_x % kTile), + RematerializeBlockIdxZ() + }; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Threadblock swizzling function for GEMMs +struct GemmHorizontalThreadblockSwizzle { + + CUTLASS_HOST_DEVICE + GemmHorizontalThreadblockSwizzle() { } + + /// Returns the shape of the problem in units of logical tiles + CUTLASS_HOST_DEVICE + static GemmCoord get_tiled_shape( + GemmCoord problem_size, + GemmCoord tile_size, + int split_k_slices) { + + return GemmCoord( + (problem_size.m() + tile_size.m() - 1) / tile_size.m(), + (problem_size.n() + tile_size.n() - 1) / tile_size.n(), + split_k_slices); + } + + /// Computes CUDA grid dimensions given a size in units of logical tiles + CUTLASS_HOST_DEVICE + static dim3 get_grid_shape(GemmCoord tiled_shape) { + return dim3(tiled_shape.n(), tiled_shape.m(), tiled_shape.k()); + } + + /// Calculates optimal swizzle width + CUTLASS_HOST_DEVICE + static int get_log_tile(GemmCoord tiled_shape) { + return 0; + } + + /// Obtains the threadblock offset (in units of threadblock-scoped tiles) + CUTLASS_DEVICE + static GemmCoord get_tile_offset(GemmCoord tiled_shape) { + return GemmCoord{ + RematerializeBlockIdxY(), + RematerializeBlockIdxX(), + RematerializeBlockIdxZ() + }; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Threadblock swizzling function for batched GEMMs +struct GemmBatchedIdentityThreadblockSwizzle { + + /// Returns the shape of the problem in units of logical tiles + CUTLASS_HOST_DEVICE + static GemmCoord get_tiled_shape( + GemmCoord problem_size, + GemmCoord tile_size, + int batch_count) { + + return GemmCoord( + (problem_size.m() + tile_size.m() - 1) / tile_size.m(), + (problem_size.n() + tile_size.n() - 1) / tile_size.n(), + batch_count % (1 << 16)); + } + + /// Computes CUDA grid dimensions given a size in units of logical tiles + CUTLASS_HOST_DEVICE + static dim3 get_grid_shape(GemmCoord tiled_shape) { + return dim3(tiled_shape.m(), tiled_shape.n(), tiled_shape.k()); + } + + /// Calculates optimal swizzle width + CUTLASS_HOST_DEVICE + static int get_log_tile(GemmCoord tiled_shape) { + return 0; + } + + /// Obtains the threadblock offset (in units of threadblock-scoped tiles) + CUTLASS_DEVICE + static GemmCoord get_tile_offset(GemmCoord tiled_shape) { + return GemmCoord{ + RematerializeBlockIdxX(), + RematerializeBlockIdxY(), + RematerializeBlockIdxZ() + }; + } + + /// Obtains the threadblock offset (in units of threadblock-scoped tiles) + CUTLASS_DEVICE + static GemmCoord get_tile_offset(int log_tile) { + int block_idx_x = RematerializeBlockIdxX(); + int block_idx_y = RematerializeBlockIdxY(); + int block_idx_z = RematerializeBlockIdxZ(); + + return GemmCoord{(block_idx_x >> log_tile), // + (block_idx_y << log_tile) + ((block_idx_x) & ((1 << (log_tile)) - 1)), + block_idx_z}; + } + + /// Gets the batch index + CUTLASS_DEVICE + static int get_batch_idx() { + return RematerializeBlockIdxZ(); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Threadblock swizzling function for split-K GEMMs +template +struct GemmSplitKIdentityThreadblockSwizzle { + + int const kTile = N; + + /// Returns the shape of the problem in units of logical tiles + CUTLASS_HOST_DEVICE + static GemmCoord get_tiled_shape( + GemmCoord problem_size, + GemmCoord tile_size, + int partitions) { + + return GemmCoord( + (problem_size.m() + tile_size.m() - 1) / tile_size.m(), + (problem_size.n() + tile_size.n() - 1) / tile_size.n(), + partitions); + } + + /// Calculates optimal swizzle width + CUTLASS_HOST_DEVICE + static int get_log_tile(GemmCoord tiled_shape) { + auto n = tiled_shape.n(); + // Thresholds picked so that it doesn't cause too many no-op CTAs + if (N >= 8 && n >= 6) + return 3; + else if (N >= 4 && n >= 3) + return 2; + else if (N >= 2 && n >= 2) + return 1; + else + return 0; + } + + /// Computes CUDA grid dimensions given a size in units of logical tiles + CUTLASS_HOST_DEVICE + static dim3 get_grid_shape(GemmCoord tiled_shape) { + int tile = 1 << get_log_tile(tiled_shape); + return dim3(tiled_shape.m() * tile, (tiled_shape.n() + tile - 1) / tile, tiled_shape.k()); + } + + /// Obtains the threadblock offset (in units of threadblock-scoped tiles) + CUTLASS_DEVICE + static GemmCoord get_tile_offset(int log_tile) { + int block_idx_x = RematerializeBlockIdxX(); + int block_idx_y = RematerializeBlockIdxY(); + int block_idx_z = RematerializeBlockIdxZ(); + + return GemmCoord{(block_idx_x >> log_tile), // + (block_idx_y << log_tile) + ((block_idx_x) & ((1 << (log_tile)) - 1)), + block_idx_z}; + } + + /// Obtains the threadblock offset (in units of threadblock-scoped tiles) + CUTLASS_DEVICE + static GemmCoord get_tile_offset(GemmCoord tiled_shape) { + + int const kTile = N; + int block_idx_x = RematerializeBlockIdxX(); + int block_idx_y = RematerializeBlockIdxY(); + + if ((tiled_shape.m() < kTile) || (tiled_shape.n() < kTile)) + return GemmCoord{block_idx_x, block_idx_y, RematerializeBlockIdxZ()}; + + return GemmCoord{ + (block_idx_x / kTile), + (block_idx_y * kTile) + (block_idx_x % kTile), + RematerializeBlockIdxZ() + }; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Threadblock swizzling function for split-K GEMMs +struct GemmSplitKHorizontalThreadblockSwizzle { + + /// Returns the shape of the problem in units of logical tiles + CUTLASS_HOST_DEVICE + static GemmCoord get_tiled_shape( + GemmCoord problem_size, + GemmCoord tile_size, + int partitions) { + + return GemmCoord( + (problem_size.m() + tile_size.m() - 1) / tile_size.m(), + (problem_size.n() + tile_size.n() - 1) / tile_size.n(), + partitions); + } + + /// Computes CUDA grid dimensions given a size in units of logical tiles + CUTLASS_HOST_DEVICE + static dim3 get_grid_shape(GemmCoord tiled_shape) { + return dim3(tiled_shape.n(), tiled_shape.m(), tiled_shape.k()); + } + + /// Calculates optimal swizzle width + CUTLASS_HOST_DEVICE + static int get_log_tile(GemmCoord tiled_shape) { + return 0; + } + + /// Obtains the threadblock offset (in units of threadblock-scoped tiles) + CUTLASS_DEVICE + static GemmCoord get_tile_offset(int log_tile) { + return GemmCoord{ + RematerializeBlockIdxY(), + RematerializeBlockIdxX(), + RematerializeBlockIdxZ() + }; + } + + /// Obtains the threadblock offset (in units of threadblock-scoped tiles) + CUTLASS_DEVICE + static GemmCoord get_tile_offset(GemmCoord tiled_shape) { + return GemmCoord{ + RematerializeBlockIdxY(), + RematerializeBlockIdxX(), + RematerializeBlockIdxZ() + }; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Threadblock swizzling function for batched GEMVs +struct GemvBatchedStridedThreadblockDefaultSwizzle { + + /// Returns the shape of the problem in units of logical tiles + CUTLASS_HOST_DEVICE + static BatchedGemmCoord get_tiled_shape( + BatchedGemmCoord problem_size, + BatchedGemmCoord tile_size) { + + return BatchedGemmCoord( + 1, // M is always 1 + (problem_size.n() + tile_size.n() - 1) / tile_size.n(), + (problem_size.k() + tile_size.k() - 1) / tile_size.k(), + (problem_size.batch() + tile_size.batch() - 1) / tile_size.batch()); + } + + /// Computes CUDA grid dimensions given a size in units of logical tiles + CUTLASS_HOST_DEVICE + static dim3 get_grid_shape(BatchedGemmCoord tiled_shape) { + return dim3(tiled_shape.n(), tiled_shape.batch(), tiled_shape.k()); + } + + /// Calculates optimal swizzle width + CUTLASS_HOST_DEVICE + static int get_log_tile(GemmCoord tiled_shape) { + return 0; + } + + /// Obtains the threadblock offset (in units of threadblock-scoped tiles) + CUTLASS_DEVICE + static BatchedGemmCoord get_tile_offset(int log_tile) { + return BatchedGemmCoord{ + 0, // M is always 1 + RematerializeBlockIdxX(), + RematerializeBlockIdxZ(), + RematerializeBlockIdxY(), + }; + } + + /// Obtains the threadblock offset (in units of threadblock-scoped tiles) + CUTLASS_DEVICE + static BatchedGemmCoord get_tile_offset() { + return BatchedGemmCoord{ + 0, // M is always 1 + RematerializeBlockIdxX(), + RematerializeBlockIdxZ(), + RematerializeBlockIdxY(), + }; + } + + /// Gets the batch tile index + CUTLASS_DEVICE + static int get_batch_tile_idx() { + return RematerializeBlockIdxY(); + } + + /// Gets the absolute batch index + CUTLASS_DEVICE + static int get_batch_idx() { + return RematerializeBlockDimY()*RematerializeBlockIdxY() + RematerializeThreadIdxY(); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/threadblock_swizzle_streamk.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/threadblock_swizzle_streamk.h new file mode 100644 index 0000000000000000000000000000000000000000..196fe1a37adeb66d5d06a10fd3621fc0d849fa00 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/threadblock/threadblock_swizzle_streamk.h @@ -0,0 +1,811 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Implements streamk threadblock mapping blockIdx to GEMM problems. +*/ + +/* + Note: CUTLASS 3x increases the host compiler requirements to C++17. However, certain + existing integrations of CUTLASS require C++11 host compilers. + + Until this requirement can be lifted, certain headers with this annotation are required + to be remain consistent with C++11 syntax. + + C++11 compatibility is enforced by `cutlass_test_unit_core_cpp11`. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/platform/platform.h" +#include "cutlass/gemm/gemm_enumerated_types.h" +#include "cutlass/conv/conv2d_problem_size.h" +#include "cutlass/conv/conv3d_problem_size.h" +#include "cutlass/gemm/threadblock/index_remat.h" + +#if !defined(__CUDACC_RTC__) +#include +#include "cutlass/core_io.h" +#include "cutlass/trace.h" +#endif + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Threadblock mapping control for GEMMs +struct ThreadblockSwizzleStreamK { + + /// Advertise StreamkFeature + using StreamkFeature = void; + + + /// Kernel traits + template + struct KernelTraits {}; + + + /// Reduction strategy + enum ReductionStrategy + { + kNone, // Data-parallel strategy (no seams, fixup, etc.) + + kAtomic, // Non-deterministic reduction of SK-block partials using atomic aggregation in L2 + + kMixed, // Deterministic reduction of SK-block partials employing either: + // (a) A separate wave of reduction thread blocks" (for scenarios with lots of + // SK-blocks per SK-tile) + // (b) Turnstile-ordered atomic aggregation in L2 (for scenarios with few + // SK-blocks per SK-tile) + }; + + static ReductionStrategy const kReductionStrategy = kMixed; + + + // + // Heuristics + // + + /// Data-parallel wave-quantization efficiency threshold (above which we go data-parallel) + static float constexpr kDpEfficiencyThreshold = 0.92f; + + /// Minimum number of MAC-iterations per streamk block + static int const kMinItersPerSkBlock = 2; + + /// Height in CTAs of a grid rasterization cohort + static int const kCohortCtasM = 8; + + /// Width in CTAs of a grid rasterization cohort + static int const kCohortCtasN = 4; + + /// Number of CTAs per cohort + static int const kCtasPerCohort = kCohortCtasN * kCohortCtasM; + + /// Cost-equivalent number of SM-iterations for fixup I/O + static int const kFixupStartupIterEquiv = 10; + static int const kFixupPeerIterEquiv = 3; + + + // + // Member state + // + + + /// The 3D value-extents of the GEMM computation volume (m,n,k) + GemmCoord problem_size; + + /// Div/mod accelerators + FastDivmod div_mod_tiled_shape_m; + FastDivmod div_mod_tiled_shape_n; + FastDivmod div_mod_tiled_cohort_shape_n; + FastDivmod div_mod_iters_per_tile; + + /// Whether to perform cohort CTA rasterization + bool cohort_raster; + + // Whether to pad and remap block indices + bool remap_block_indices; + + /// CTA occupancy per SM + int sm_occupancy; + + /// Number of SMs for dispatch heuristics to load-balance using Stream-K CTAs (wave size) + int avail_sms; + + int dp_blocks; /// Number of data-parallel thread blocks in the grid + int dp_first_wave_tiles; /// Number of output tiles each CTA in the first DP wave will produce + + /// Number of reduction blocks in the grid + int reduction_blocks; + + int sk_waves; + int sk_tiles; + int sk_big_blocks_per_region; + int sk_iters_per_region; + + /// Div/mod accelerators + FastDivmod div_mod_sk_iters_per_normal_block; + FastDivmod div_mod_sk_iters_per_big_block; + FastDivmod div_mod_sk_iters_per_region; + FastDivmod div_mod_sk_regions; //!! used in block map + FastDivmod div_mod_sk_blocks_per_region; //!! used in block map + + /// The batch count + int batch_count; + + + // + // Host+device interface + // + + /// Constructor + CUTLASS_HOST_DEVICE + ThreadblockSwizzleStreamK() {} + + /// Returns the GEMM volume in thread block tiles + CUTLASS_HOST_DEVICE + GemmCoord tiled_shape() const + { + return GemmCoord( + static_cast(div_mod_tiled_shape_m), + static_cast(div_mod_tiled_shape_n), + batch_count); + } + + /// Number of iterations per output tile + CUTLASS_HOST_DEVICE + int iters_per_tile() const + { + return static_cast(div_mod_iters_per_tile); + } + + /// Number of iterations for normal SK-blocks + CUTLASS_HOST_DEVICE + int sk_iters_per_normal_block() const + { + return static_cast(div_mod_sk_iters_per_normal_block); + } + + /// Number of SK regions + CUTLASS_HOST_DEVICE + int sk_regions() const + { + return static_cast(div_mod_sk_regions); + } + + /// Number of SK blocks per region (splitting factor) + CUTLASS_HOST_DEVICE + int sk_blocks_per_region() const + { + return static_cast(div_mod_sk_blocks_per_region); + } + + + // + // Host-side interface + // + + /// Debug print + void Print() + { +#ifndef __CUDA_ARCH__ + auto tiles = tiled_shape().mn().product(); + std::cout << + "problem_size: (" << problem_size.m() << "," << problem_size.n() << ")" << + ", tiled_shape: (" << tiled_shape().m() << "," << tiled_shape().n() << ")" << + ", tiles: " << tiles << + ", dp_tiles: " << tiles - sk_tiles << + ", sk_tiles: " << sk_tiles << + ", iters_per_tile: " << iters_per_tile() << + ", reduction_blocks: " << reduction_blocks << + ", dp_blocks: " << dp_blocks << + ", dp_waves: " << dp_blocks / avail_sms << + ", dp_first_wave_tiles: " << dp_first_wave_tiles << + ", sk_blocks_per_region: " << sk_blocks_per_region() << + ", sk_regions: " << sk_regions() << + ", sk_waves: " << sk_waves << + ", sk_iters_per_normal_block: " << sk_iters_per_normal_block() << + ", sk_big_blocks_per_region: " << sk_big_blocks_per_region << + ", remap_block_indices: " << remap_block_indices << + ", cohort_raster: " << cohort_raster << + ", sm_occupancy: " << sm_occupancy << + ", avail_sms: " << avail_sms << + ", num_blocks: " << get_num_blocks() << + "\n\n"; +#endif + } + + + // Compute sk_blocks to dispatch for a given number of sk_tiles + static void get_sk_blocks( + int &sk_blocks, /// [out] + int &savings_iters, /// [out] + int sk_tiles, + int iters_per_tile, + int avail_sms, + int max_sk_occupancy, + bool allow_partial_wave) + { + savings_iters = INT_MIN; + sk_blocks = 0; + + if (sk_tiles == 0) { + return; + } + + int sk_iters = sk_tiles * iters_per_tile; + + int dp_equiv_waves = (sk_tiles + avail_sms - 1) / avail_sms; + int dp_equiv_iters = iters_per_tile * dp_equiv_waves; + + int min_sk_blocks = (allow_partial_wave) ? fast_min(avail_sms, sk_tiles + 1) : avail_sms; + int max_sk_blocks = fast_min(avail_sms * max_sk_occupancy, sk_iters / kMinItersPerSkBlock); + + for (int trial_sk_blocks = min_sk_blocks; trial_sk_blocks <= max_sk_blocks; ++trial_sk_blocks) + { + int sk_waves = (trial_sk_blocks + avail_sms - 1) / avail_sms; + int max_sk_iters_per_block = (sk_iters + trial_sk_blocks - 1) / trial_sk_blocks; + int sk_iter_equiv = max_sk_iters_per_block * sk_waves; + + int num_peers = ((trial_sk_blocks + sk_tiles - 1) / sk_tiles) + 1; // add one for alignment skew + + float iter_cost = 0.02f * float(num_peers) * float(sk_iter_equiv); + + if (trial_sk_blocks % sk_tiles == 0) + { + // aligned + num_peers = (trial_sk_blocks / sk_tiles); + + iter_cost = 0.0f; + } + + float peer_cost = 2.0f * float(num_peers); + + float base_cost = 2.0f * float(sk_waves); + + int fixup_iter_equiv = int(base_cost + iter_cost + peer_cost); + + int trial_savings_iters = dp_equiv_iters - sk_iter_equiv - fixup_iter_equiv; + + if (trial_savings_iters >= savings_iters) { + savings_iters = trial_savings_iters; + sk_blocks = trial_sk_blocks; + } + } + } + + + /// Determine the populations of DP and SK blocks to invoke for the given number of output tiles + static void get_blocks( + int &dp_tiles, /// [out] + int &sk_blocks, /// [out] + int output_tiles, + int iters_per_tile, + int avail_sms, + int sm_occupancy) + { + int full_waves = output_tiles / avail_sms; + int full_wave_tiles = full_waves * avail_sms; + int partial_wave_tiles = output_tiles - full_wave_tiles; + + int score = -1; + dp_tiles = output_tiles; + sk_blocks = 0; + + if (partial_wave_tiles == 0) + { + // Perfect quantization + return; + } + + if (full_waves < sm_occupancy) + { + // We're less than full GPU occupancy + + // Form the SK wave from the partial wave to get us up to full GPU occupancy + int max_sk_occupancy = sm_occupancy - full_waves; + + dp_tiles = full_wave_tiles; + + get_sk_blocks( + sk_blocks, + score, + partial_wave_tiles, + iters_per_tile, + avail_sms, + max_sk_occupancy, + true); // we can run with less than a full wave of SK-blocks + + if (score < 0) { + // not profitable + sk_blocks = 0; + dp_tiles = output_tiles; + } + + return; + } + + // We're at (or greater) than GPU occupancy + + if ((sm_occupancy > 1 ) && (full_waves % sm_occupancy == sm_occupancy - 1)) + { + // If occupancy is more than one CTA per SM, form the SK wave from the partial + // wave to get us to full GPU occupancy + int max_sk_occupancy = 1; + + dp_tiles = full_wave_tiles; + + get_sk_blocks( + sk_blocks, + score, + partial_wave_tiles, + iters_per_tile, + avail_sms, + max_sk_occupancy, + true); // we can run with less than a full wave of SK-blocks + + if (score >= 0) { + return; + } + } + + // Form the SK wave by combining the last full wave and the partial wave + // We're less than full GPU occupancy + dp_tiles = full_wave_tiles - avail_sms; + + int max_sk_occupancy = sm_occupancy - ((full_waves - 1) % sm_occupancy); + + get_sk_blocks( + sk_blocks, + score, + partial_wave_tiles + avail_sms, + iters_per_tile, + avail_sms, + max_sk_occupancy, + false); // we cannot run with less than a full wave of SK-blocks + + if (score < 0) { + // not profitable + sk_blocks = 0; + dp_tiles = output_tiles; + } + + } + + /// Constructor: *Gemm* problem size (m, n, k) + template + ThreadblockSwizzleStreamK( + KernelTraits const kernel_traits_, + GemmUniversalMode const mode_, + GemmCoord const problem_size_, + GemmCoord const tile_size_, + int const batch_split_, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K) + int const sm_occupancy_, + int const device_sms_, + int const avail_sms_) /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) + : + problem_size(problem_size_), + batch_count((mode_ == GemmUniversalMode::kBatched) ? batch_split_ : 1), + reduction_blocks(0), + dp_blocks(0), + dp_first_wave_tiles(1), // Default: one tile per DP-block in the first wave of DP blocks + sk_tiles(0), + sk_big_blocks_per_region(0), + sk_iters_per_region(0), + sk_waves(0), + sm_occupancy(sm_occupancy_), + remap_block_indices(false), + avail_sms(fast_max(1, avail_sms_)), + cohort_raster(false) + { + int gpu_occupancy = device_sms_ * sm_occupancy; + int iters_per_tile = (problem_size.k() + tile_size_.k() - 1) / tile_size_.k(); + int sk_iters_per_normal_block = 0; + + int sk_regions = 1; // Default: a single region of iteration space (across all SK tiles) + int sk_blocks_per_region = 0; + + GemmCoord tiled_shape( + (problem_size.m() + tile_size_.m() - 1) / tile_size_.m(), + (problem_size.n() + tile_size_.n() - 1) / tile_size_.n(), + batch_count); + + size_t problem_bytes = + (sizeof(typename GemmKernel::ElementC) * problem_size.m() * problem_size.n()) + + (sizeof(typename GemmKernel::ElementA) * problem_size.m() * problem_size.k()) + + (sizeof(typename GemmKernel::ElementB) * problem_size.k() * problem_size.n()); + + size_t problem_flops = size_t(problem_size.m()) * size_t(problem_size.n()) * size_t(problem_size.k()) * 2; + + float flops_per_byte = float(problem_flops) / float(problem_bytes); + + int output_tiles = tiled_shape.m() * tiled_shape.n(); + int waves = (output_tiles + avail_sms - 1) / avail_sms; + float dp_efficiency = float(output_tiles) / float(waves * avail_sms); + + // + // Determine dispatch composition of DP-tiles and SK-blocks + // + + // Start with a DP-only configuration + int dp_tiles = output_tiles; // Number of data-parallel tiles + int sk_blocks = 0; // Number of thread blocks to produce the remaining SK tiles + + // Only kGemm mode allows for SK load balancing + if (mode_ == GemmUniversalMode::kGemm) + { + int split_factor = batch_split_; + if (split_factor > 1) + { + // Split-K override + dp_tiles = 0; + sk_blocks = output_tiles * split_factor; + } + else if ((kReductionStrategy != kNone) && // Load-balancing strategy statically enabled + (avail_sms > 1)) // Plurality of SMs to load balance across + { + // Use heuristics + get_blocks( + dp_tiles, /// [out] + sk_blocks, /// [out] + output_tiles, + iters_per_tile, + avail_sms, + sm_occupancy); + } + } + + sk_tiles = output_tiles - dp_tiles; + + + // Compute SK block iteration details + if (sk_blocks > 0) + { + sk_waves = (sk_blocks + avail_sms - 1) / avail_sms; + + int sk_iters = sk_tiles * iters_per_tile; + sk_blocks = fast_min(sk_blocks, sk_iters); + + sk_iters_per_normal_block = sk_iters / sk_blocks; + int extra_sk_iters = sk_iters - (sk_iters_per_normal_block * sk_blocks); + int sk_big_blocks = extra_sk_iters; + + if ((sk_blocks > sk_tiles) && (sk_blocks % sk_tiles == 0)) + { + // Split-K decomposition + sk_regions = sk_tiles; + } + + sk_blocks_per_region = sk_blocks / sk_regions; + sk_big_blocks_per_region = sk_big_blocks / sk_regions; + sk_iters_per_region = sk_iters / sk_regions; + + // Use a separate reduction wave when all of: + // - Non-atomic reduction stratgy + // - The number of SK waves won't fully occupy the GPU (Otherwise we don't have + // a strong-scaling case for more parallel reduction) + // - More than three peers working on an SK tile. (This occurs when the ratio of + // SK-blocks to SK-tiles > 2, as a single tile may be covered by four SK-blocks, + // e.g.:[partial-block | block | block | partial-block] ). With three or + // less peers, the two non-finishing SK-blocks are not expexted to contend. + if ((kReductionStrategy == kMixed) && + (sk_waves < sm_occupancy) && + (sk_blocks > 2 * sk_tiles)) + { + // Launch a reduction block for every accumulator fragment in each SK-tile + static const int kAccumulatorFragments = GemmKernel::Epilogue::kAccumulatorFragments; + reduction_blocks = sk_tiles * kAccumulatorFragments; + + } + + // When we have a multi-occupancy kernel and at least two waves of active blocks (where + // at least one wave is SK blocks), we need to (1) dispatch at least four waves, and (2) + // remap the block indices so that we can reliably spread the SK blocks evenly across the + // device's first SM occupancy valence. Also see get_num_blocks() and get_block_idx(). + remap_block_indices = ( + (sm_occupancy > 1) && + (device_sms_ == avail_sms) && + (get_num_active_blocks() > avail_sms * 2)); + + // Initialize fast div/mod members related to SK + div_mod_sk_iters_per_normal_block = FastDivmod(sk_iters_per_normal_block); + div_mod_sk_iters_per_big_block = FastDivmod(sk_iters_per_normal_block + 1); + div_mod_sk_iters_per_region = FastDivmod(sk_iters_per_region); + div_mod_sk_regions = FastDivmod(sk_regions); + div_mod_sk_blocks_per_region = FastDivmod(sk_blocks_per_region); + } + + // + // Compute DP blocks + // + + dp_blocks = dp_tiles; + + cutlass::gemm::GemmCoord tiled_cohort_shape( + (tiled_shape.m() + kCohortCtasM - 1) / kCohortCtasM, + (tiled_shape.n() + kCohortCtasN - 1) / kCohortCtasN, + tiled_shape.k()); + int cohort_blocks = (tiled_cohort_shape.m() * tiled_cohort_shape.n()) * kCtasPerCohort; + float cohort_efficiency = float(dp_blocks) / float(cohort_blocks); + + // Check if the SK tiles would be in cohorts that are in-bounds + bool sk_in_range = true; + if (sk_tiles > 0) + { + int last_sk_tile = sk_tiles - 1; + int cohort_tile_idx = last_sk_tile / kCtasPerCohort; + int cohort_grid_m = cohort_tile_idx / tiled_cohort_shape.n(); + int cohort_grid_n = (cohort_grid_m > 0) ? + tiled_cohort_shape.n() - 1 : + cohort_tile_idx % tiled_cohort_shape.n(); + + if ((((cohort_grid_m + 1) * kCohortCtasM) >= tiled_shape.m()) || + (((cohort_grid_n + 1) * kCohortCtasN) >= tiled_shape.n())) + { + sk_in_range = false; + } + + } + + // Decide if we're going to be doing cohort raster + if (sk_in_range && + (dp_blocks >= gpu_occupancy * 2) && + (cohort_efficiency > 0.85f)) + { + cohort_raster = true; + dp_blocks = cohort_blocks; + } + else if (sk_waves > 0) + { + // Update semi-persistence of first DP wave to ensure full grid wavesets + // (Only applies when there's an SK component and we're not doing blocked cohort rasterization) + int dp_tile_waves = (dp_tiles + avail_sms - 1) / avail_sms; + int full_dp_tile_waves = dp_tiles / avail_sms; + int waveset_excess = (sk_waves + dp_tile_waves) % sm_occupancy; + + if (dp_first_wave_tiles + waveset_excess <= full_dp_tile_waves) + { + dp_first_wave_tiles += waveset_excess; + dp_blocks -= (waveset_excess * avail_sms); + } + } + + // Setup fast-div/mod for device-side usage + div_mod_tiled_shape_m = FastDivmod(tiled_shape.m()); + div_mod_tiled_shape_n = FastDivmod(tiled_shape.n()); + div_mod_tiled_cohort_shape_n = FastDivmod(tiled_cohort_shape.n()); + div_mod_iters_per_tile = FastDivmod(iters_per_tile); + + } + + /// Number of blocks performing useful work + int get_num_active_blocks() const + { + return (sk_waves * avail_sms) + dp_blocks + reduction_blocks; + } + + /// Obtains number of threadblocks per GEMM + int get_num_blocks() const + { + int active_blocks = get_num_active_blocks(); + if (remap_block_indices) + { + // Add padding blocks if we are performing remapping in order to dispatch a grid of at least four waves + return fast_max(active_blocks, avail_sms * 4); + } + + return active_blocks; + } + + + /// Obtains grid extents in CTAs + dim3 get_grid_dims() const + { + return dim3(get_num_blocks(), 1, batch_count); + } + + + // + // Device-side interface + // + + /// Obtains number of threadblocks per GEMM + CUTLASS_DEVICE + int device_num_blocks() const + { + return gridDim.x; + } + + /// Obtains tile index for the given sk iteration + CUTLASS_DEVICE + int get_sk_tile_idx(int iter) const + { + int tile_idx = div_mod_iters_per_tile.div(iter); + return tile_idx; + } + + /// Obtains the batch index + CUTLASS_DEVICE + int get_batch_idx() const + { + return RematerializeBlockIdxZ(); + } + + /// Obtains the calling threadblock's tiled coordinates for the given tile index + CUTLASS_DEVICE + GemmCoord get_tile_offset(int tile_idx) const + { + int m, n; + + // row-major raster + div_mod_tiled_shape_n(m, n, tile_idx); + + if (tiled_shape().m() < tiled_shape().n()) + { + // column-major raster + div_mod_tiled_shape_m(n, m, tile_idx); + } + + if (cohort_raster) + { + // tiled cohort raster + int cohort_tile_idx = tile_idx / kCtasPerCohort; + int cohort_grid_m, cohort_grid_n; + div_mod_tiled_cohort_shape_n(cohort_grid_m, cohort_grid_n, cohort_tile_idx); + + int block_idx_cohort = tile_idx % kCtasPerCohort; + int block_cohort_m = block_idx_cohort / kCohortCtasN; + int block_cohort_n = block_idx_cohort % kCohortCtasN; + + m = (cohort_grid_m * kCohortCtasM) + block_cohort_m; + n = (cohort_grid_n * kCohortCtasN) + block_cohort_n; + } + + return GemmCoord(m, n, get_batch_idx()); + } + + /// Obtains the calling threadblock's tiled coordinates for the given tile index (row-major rasterization) + CUTLASS_DEVICE + GemmCoord get_tile_offset_row_major(int tile_idx) const + { + // row-major raster + int m, n; + div_mod_tiled_shape_n(m, n, tile_idx); + return GemmCoord(m, n, get_batch_idx()); + } + + /// Obtains calling threadblock's linear threadblock index + CUTLASS_DEVICE + int get_block_idx() const + { + int block_idx = RematerializeBlockIdxX(); + + // Remap the block indices for the first two waves of thread blocks if + // we have multi-occupancy and the grid constitutes four or more waves + if (remap_block_indices && (block_idx < avail_sms * 2)) + { + int dest_sm = block_idx / 2; + int dest_wave = block_idx % 2; + int remapped_block_idx = dest_sm + (dest_wave * avail_sms); + block_idx = remapped_block_idx; + } + + // Remap block indices to interleave SK regions to limit intra-region waiting + if (block_idx < sk_regions() * sk_blocks_per_region()) + { + int block_in_region; + int region; + div_mod_sk_regions(block_in_region, region, block_idx); + block_idx = (region * sk_blocks_per_region()) + block_in_region; + } + + return block_idx; + } + + + /// Obtains calling linear threadblock index of the first block to work on the given tile + CUTLASS_DEVICE + int get_sk_block_idx(int iter) const + { + int region_idx; + int iter_in_region; + div_mod_sk_iters_per_region(region_idx, iter_in_region, iter); + + int big_block_iters = (sk_big_blocks_per_region * sk_iters_per_normal_block()) + sk_big_blocks_per_region; // number of iterations in the region's big blocks + int normal_block_iters = iter_in_region - big_block_iters; // number of iterations in the region's normal blocks + + int big_block_idx_in_region = div_mod_sk_iters_per_big_block.div(iter_in_region); + int normal_block_idx_in_region = sk_big_blocks_per_region + div_mod_sk_iters_per_normal_block.div(normal_block_iters); + + int block_idx_in_region = (big_block_idx_in_region < sk_big_blocks_per_region) ? + big_block_idx_in_region : + normal_block_idx_in_region; + + int owning_block_idx = (sk_blocks_per_region() * region_idx) + block_idx_in_region; + + return owning_block_idx; + } + + /// Obtains iteration extends for the given SK block index + CUTLASS_DEVICE + void get_iter_extents( + int sk_block_idx, + int &block_iter_begin, + int &block_iter_end) const + { + int region_idx; + int block_idx_in_region; + div_mod_sk_blocks_per_region(region_idx, block_idx_in_region, sk_block_idx); + + block_iter_begin = (region_idx * sk_iters_per_region) + (block_idx_in_region * sk_iters_per_normal_block()); + + // Adjust extents for the first "num_big_blocks" blocks that get one extra iteration + int block_iters = sk_iters_per_normal_block(); + if (block_idx_in_region < sk_big_blocks_per_region) { + // This is a +1 iteration block + block_iter_begin += block_idx_in_region; + block_iters++; + } else { + // This is a regular block + block_iter_begin += sk_big_blocks_per_region; + } + block_iter_end = block_iter_begin + block_iters; + } + + + /// Obtains calling linear threadblock index of the first block to work on the given tile + CUTLASS_DEVICE + int get_first_block_idx(int tile_idx, int block_idx) const + { + if (tile_idx >= sk_tiles) { + // DP tile + return block_idx; + } + + int iter = tile_idx * iters_per_tile(); + return get_sk_block_idx(iter); + } + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_complex_tensor_op.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_complex_tensor_op.h new file mode 100644 index 0000000000000000000000000000000000000000..1c794b1ec6e81baf0027278f1e6526e14b82a9dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_complex_tensor_op.h @@ -0,0 +1,612 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Default warp-level GEMM operators selected by data type, size, and layouts of operands. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/warp/mma_complex_tensor_op.h" +#include "cutlass/gemm/warp/mma_complex_tensor_op_fast_f32.h" +#include "cutlass/gemm/warp/mma_gaussian_complex_tensor_op.h" +#include "cutlass/layout/tensor_op_multiplicand_sm80.h" + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename ElementC_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Complex transform on A operand + ComplexTransform TransformA = ComplexTransform::kNone, + /// Complex transform on B operand + ComplexTransform TransformB = ComplexTransform::kNone, + /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator_ = arch::OpMultiplyAddComplex> +struct DefaultMmaComplexTensorOp; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex*complex case +// 4 real-valued mma operations +// A = (ar + j ai), B (br +j bi), D = AB +// D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) +///////////////////////////////////////////////////////////////////////////////////////////////// +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Real-valued underlying type of complex-valued A operand + typename RealElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Real-valued underlying type of complex-valued B operand + typename RealElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Real-valued underlying type of complex-valued C operand + typename RealElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Complex transform on A operand + ComplexTransform TransformA, + /// Complex transform on B operand + ComplexTransform TransformB> +struct DefaultMmaComplexTensorOp< + WarpShape_, + InstructionShape_, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + TransformA, + TransformB, + arch::OpMultiplyAddComplex> { + + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + InstructionShape_, + 32, + RealElementA, + cutlass::layout::RowMajor, + RealElementB, + cutlass::layout::ColumnMajor, + RealElementC, + cutlass::layout::RowMajor, + arch::OpMultiplyAdd>, + cutlass::MatrixShape<1, 1> + >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaComplexTensorOp< + WarpShape_, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + Policy, + TransformA, + TransformB>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex*complex case using GaussianComplex operation +// 3 real-valued mma operations +// A = (ar + j ai), B = (br +j bi), D = AB +// P1 = (ar + ai) * br, P2 = - ar * (br - bi), P3 = ai * (br + bi) +// D = dr + j di = (P1 - P3) + j (P1 + P2) +///////////////////////////////////////////////////////////////////////////////////////////////// +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Real-valued underlying type of complex-valued A operand + typename RealElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Real-valued underlying type of complex-valued B operand + typename RealElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Real-valued underlying type of complex-valued C operand + typename RealElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Complex transform on A operand + ComplexTransform TransformA, + /// Complex transform on B operand + ComplexTransform TransformB> +struct DefaultMmaComplexTensorOp< + WarpShape_, + InstructionShape_, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + TransformA, + TransformB, + arch::OpMultiplyAddGaussianComplex> { + + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + InstructionShape_, + 32, + RealElementA, + cutlass::layout::RowMajor, + RealElementB, + cutlass::layout::ColumnMajor, + RealElementC, + cutlass::layout::RowMajor, + arch::OpMultiplyAdd>, + cutlass::MatrixShape<1, 1> + >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaGaussianComplexTensorOp< + WarpShape_, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + Policy, + TransformA, + TransformB>; +}; +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Partial specialization - input and output types are complex*complex +// Use TF32 tensor operation internally +// 4 real-valued mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 operations on TF32 +// A = (ar + j ai), B (br +j bi), D = AB +// D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) +///////////////////////////////////////////////////////////////////////////////////////////////// +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Complex transform on A operand + ComplexTransform TransformA, + /// Complex transform on B operand + ComplexTransform TransformB> +struct DefaultMmaComplexTensorOp< + WarpShape_, + InstructionShape_, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + TransformA, + TransformB, + arch::OpMultiplyAddComplex> { + + // Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 mma instruction + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + InstructionShape_, + 32, + tfloat32_t, + cutlass::layout::RowMajor, + tfloat32_t, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + arch::OpMultiplyAdd>, + cutlass::MatrixShape<1, 1> + >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaComplexTensorOp< + WarpShape_, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + Policy, + TransformA, + TransformB>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Partial specialization - input and output types are complex*complex +// Use BF16 tensor operation internally +// 4 real-valued mma.sync.aligned.m16n8k8.f32.bf16.bf16.f32 operations on BF16 +// A = (ar + j ai), B (br +j bi), D = AB +// D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) +///////////////////////////////////////////////////////////////////////////////////////////////// +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Complex transform on A operand + ComplexTransform TransformA, + /// Complex transform on B operand + ComplexTransform TransformB> +struct DefaultMmaComplexTensorOp< + WarpShape_, + InstructionShape_, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + TransformA, + TransformB, + arch::OpMultiplyAddFastBF16> { + + // Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.bf16.bf16.f32 mma instruction + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + InstructionShape_, + 32, + bfloat16_t, + cutlass::layout::RowMajor, + bfloat16_t, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + arch::OpMultiplyAdd>, + cutlass::MatrixShape<1, 1> + >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaComplexTensorOp< + WarpShape_, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + Policy, + TransformA, + TransformB>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Partial specialization - input and output types are complex*complex +// Use F16 tensor operation internally +// 4 real-valued mma.sync.aligned.m16n8k8.f32.f16.f16.f32 operations on F16 +// A = (ar + j ai), B (br +j bi), D = AB +// D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) +///////////////////////////////////////////////////////////////////////////////////////////////// +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Complex transform on A operand + ComplexTransform TransformA, + /// Complex transform on B operand + ComplexTransform TransformB> +struct DefaultMmaComplexTensorOp< + WarpShape_, + InstructionShape_, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + TransformA, + TransformB, + arch::OpMultiplyAddFastF16> { + + // Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.f16.f16.f32 mma instruction + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + InstructionShape_, + 32, + half_t, + cutlass::layout::RowMajor, + half_t, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + arch::OpMultiplyAdd>, + cutlass::MatrixShape<1, 1> + >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaComplexTensorOp< + WarpShape_, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + Policy, + TransformA, + TransformB>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// 3xTF32 or 4xTF32 (fast and accurate complex operation) +/// Partial specialization - input and output types are complex * complex +// Use 3xTF32 or 4xTF32 tensor operation internally +// 4 real-valued mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 operations on TF32 +// A = (ar + j ai), B (br +j bi), D = AB +// D = dr + j di = 3x[(ar*br - ai*bi) + j (ar*bi + ai*br)] +///////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Complex transform on A operand + ComplexTransform TransformA, + /// Complex transform on B operand + ComplexTransform TransformB> +struct DefaultMmaComplexTensorOp< + WarpShape_, + InstructionShape_, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + TransformA, + TransformB, + arch::OpMultiplyAddComplexFastF32> { + + // Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 mma instruction + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + InstructionShape_, + 32, + tfloat32_t, + cutlass::layout::RowMajor, + tfloat32_t, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::RowMajor, + arch::OpMultiplyAdd>, + cutlass::MatrixShape<1, 1> + >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaComplexTensorOpFastF32< + WarpShape_, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + Policy, + TransformA, + TransformB>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex*complex case +// 4 real-valued mma.sync.aligned.m16n8k4.f64.f64.f64.f64 operations +// A = (ar + j ai), B (br +j bi), D = AB +// D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) +///////////////////////////////////////////////////////////////////////////////////////////////// +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename WarpShape_, + /// Real-valued underlying type of complex-valued A operand + typename RealElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Real-valued underlying type of complex-valued B operand + typename RealElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Real-valued underlying type of complex-valued C operand + typename RealElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Complex transform on A operand + ComplexTransform TransformA, + /// Complex transform on B operand + ComplexTransform TransformB> +struct DefaultMmaComplexTensorOp< + WarpShape_, + GemmShape<16, 8, 4>, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + TransformA, + TransformB, + arch::OpMultiplyAddComplex> { + + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + GemmShape<16, 8, 4>, + 32, + RealElementA, + cutlass::layout::RowMajor, + RealElementB, + cutlass::layout::ColumnMajor, + RealElementC, + cutlass::layout::RowMajor, + arch::OpMultiplyAdd>, + cutlass::MatrixShape<1, 1> + >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaComplexTensorOp< + WarpShape_, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + Policy, + TransformA, + TransformB, + true>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Partial specialization for complex*complex case using GaussianComplex operation +// 3 real-valued mma.sync.aligned.m16n8k4.f64.f64.f64.f64 operations +// A = (ar + j ai), B = (br +j bi), D = AB +// P1 = (ar + ai) * br, P2 = - ar * (br - bi), P3 = ai * (br + bi) +// D = dr + j di = (P1 - P3) + j (P1 + P2) +///////////////////////////////////////////////////////////////////////////////////////////////// +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename WarpShape_, + /// Real-valued underlying type of complex-valued A operand + typename RealElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Real-valued underlying type of complex-valued B operand + typename RealElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Real-valued underlying type of complex-valued C operand + typename RealElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Complex transform on A operand + ComplexTransform TransformA, + /// Complex transform on B operand + ComplexTransform TransformB> +struct DefaultMmaComplexTensorOp< + WarpShape_, + GemmShape<16, 8, 4>, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + TransformA, + TransformB, + arch::OpMultiplyAddGaussianComplex> { + + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + GemmShape<16, 8, 4>, + 32, + RealElementA, + cutlass::layout::RowMajor, + RealElementB, + cutlass::layout::ColumnMajor, + RealElementC, + cutlass::layout::RowMajor, + arch::OpMultiplyAdd>, + cutlass::MatrixShape<1, 1> + >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaGaussianComplexTensorOp< + WarpShape_, + complex, + LayoutA, + complex, + LayoutB, + complex, + LayoutC, + Policy, + TransformA, + TransformB, + true>; +}; +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_sparse_tensor_op.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_sparse_tensor_op.h new file mode 100644 index 0000000000000000000000000000000000000000..89f8f1c7e392a35f62c5a6c567e6a2173edb1b01 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_sparse_tensor_op.h @@ -0,0 +1,165 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Default warp-level GEMM operators selected by data type, size, and layouts of operands. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/warp/mma_sparse_tensor_op.h" + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename ElementC_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Operator describing the tensor operation + typename Operator_ = arch::OpMultiplyAdd, + /// Number of partitions along K dimension + int PartitionsK = 1, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false +> +struct DefaultSparseMmaTensorOp; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial Specialization - inputs and output types are float - uses TF32 internally +template < + /// Shape of one matrix production operation (concept: GemmShape) + typename WarpShape_, + /// Shape of target matrix multiply instruction (concept: GemmShape) + typename InstructionShape_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Number of partitions along K dimension + int PartitionsK, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor> +struct DefaultSparseMmaTensorOp< + WarpShape_, + InstructionShape_, + float, LayoutA, + float, LayoutB, + float, LayoutC, + arch::OpMultiplyAdd, PartitionsK, AccumulatorsInRowMajor> { + + // Uses TF32 internally + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::SparseMma< + InstructionShape_, + 32, + tfloat32_t, cutlass::layout::RowMajor, + tfloat32_t, cutlass::layout::ColumnMajor, + float, cutlass::layout::RowMajor, + arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::SparseMmaTensorOp< + WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC, + Policy, PartitionsK, AccumulatorsInRowMajor>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for m-by-n-by-kgroup +template < + /// Shape of one matrix production operation (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A elements + typename ElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Data type of B elements + typename ElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Element type of C matrix + typename ElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Operator describing the tensor operation + typename Operator_, + /// Number of partitions along K dimension + int PartitionsK, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor> +struct DefaultSparseMmaTensorOp { + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::SparseMma, + cutlass::MatrixShape<1, 1> >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::SparseMmaTensorOp< + WarpShape_, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, + Policy, PartitionsK, AccumulatorsInRowMajor>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_tensor_op.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_tensor_op.h new file mode 100644 index 0000000000000000000000000000000000000000..3bb65a437c67b407d7be47b2155ee31f3fee9f7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_tensor_op.h @@ -0,0 +1,123 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Default warp-level GEMM operators selected by data type, size, and layouts of operands. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/warp/mma_tensor_op.h" + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename ElementC_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Operator describing the tensor operation + typename Operator_ = arch::OpMultiplyAdd, + /// Number of partitions along K dimension + int PartitionsK = 1, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false> +struct DefaultMmaTensorOp; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for m-by-n-by-kgroup +template < + /// Shape of one matrix production operation (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A elements + typename ElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Data type of B elements + typename ElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Element type of C matrix + typename ElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Operator describing the tensor operation + typename Operator_, + /// Number of partitions along K dimension + int PartitionsK, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor> +struct DefaultMmaTensorOp { + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma, + cutlass::MatrixShape<1, 1> >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaTensorOp< + WarpShape_, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, + Policy, PartitionsK, AccumulatorsInRowMajor>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#include "cutlass/gemm/warp/default_mma_tensor_op_sm80.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_tensor_op_sm80.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_tensor_op_sm80.h new file mode 100644 index 0000000000000000000000000000000000000000..d4d8026a44aff5ad17ab709482aab6ca14e88dd2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_tensor_op_sm80.h @@ -0,0 +1,238 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Default warp-level GEMM operators selected by data type, size, and layouts of operands. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/mma.h" +#include "cutlass/gemm/warp/mma_tensor_op.h" +#include "cutlass/gemm/warp/mma_tensor_op_fast_f32.h" +#include "cutlass/gemm/warp/default_mma_tensor_op.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial Specialization - inputs and output types are float - uses BF16 internally +template < + /// Shape of one matrix production operation (concept: GemmShape) + typename WarpShape_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Number of partitions along K dimension + int PartitionsK, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor> +struct DefaultMmaTensorOp< + WarpShape_, + GemmShape<16, 8, 8>, + float, LayoutA, + float, LayoutB, + float, LayoutC, + arch::OpMultiplyAddFastBF16, + PartitionsK, AccumulatorsInRowMajor> { + + // Uses BF16 internally + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + GemmShape<16, 8, 8>, + 32, + bfloat16_t, cutlass::layout::RowMajor, + bfloat16_t, cutlass::layout::ColumnMajor, + float, cutlass::layout::RowMajor, + arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaTensorOp< + WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC, + Policy, PartitionsK, AccumulatorsInRowMajor>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial Specialization - inputs and output types are float - uses F16 internally +template < + /// Shape of one matrix production operation (concept: GemmShape) + typename WarpShape_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Number of partitions along K dimension + int PartitionsK, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor> +struct DefaultMmaTensorOp< + WarpShape_, + GemmShape<16, 8, 8>, + float, LayoutA, + float, LayoutB, + float, LayoutC, + arch::OpMultiplyAddFastF16, + PartitionsK, AccumulatorsInRowMajor> { + + // Uses F16 internally + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + GemmShape<16, 8, 8>, + 32, + half_t, cutlass::layout::RowMajor, + half_t, cutlass::layout::ColumnMajor, + float, cutlass::layout::RowMajor, + arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaTensorOp< + WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC, + Policy, PartitionsK, AccumulatorsInRowMajor>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial Specialization - inputs and output types are float - uses TF32 internally +template < + /// Shape of one matrix production operation (concept: GemmShape) + typename WarpShape_, + /// Shape of target matrix multiply instruction (concept: GemmShape) + typename InstructionShape_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Number of partitions along K dimension + int PartitionsK, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor> +struct DefaultMmaTensorOp< + WarpShape_, + InstructionShape_, + float, LayoutA, + float, LayoutB, + float, LayoutC, + arch::OpMultiplyAdd, PartitionsK, AccumulatorsInRowMajor> { + + // Uses TF32 internally + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + InstructionShape_, + 32, + tfloat32_t, cutlass::layout::RowMajor, + tfloat32_t, cutlass::layout::ColumnMajor, + float, cutlass::layout::RowMajor, + arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaTensorOp< + WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC, + Policy, PartitionsK, AccumulatorsInRowMajor>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial Specialization - inputs and output types are float - uses TF32 for Fast Accurate FP32 +template < + /// Shape of one matrix production operation (concept: GemmShape) + typename WarpShape_, + /// Shape of target matrix multiply instruction (concept: GemmShape) + typename InstructionShape_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Number of partitions along K dimension + int PartitionsK, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor> +struct DefaultMmaTensorOp< + WarpShape_, + InstructionShape_, + float, LayoutA, + float, LayoutB, + float, LayoutC, + arch::OpMultiplyAddFastF32, PartitionsK, AccumulatorsInRowMajor> { + + // Uses TF32 internally + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma< + InstructionShape_, + 32, + cutlass::tfloat32_t, cutlass::layout::RowMajor, + cutlass::tfloat32_t, cutlass::layout::ColumnMajor, + float, cutlass::layout::RowMajor, + arch::OpMultiplyAdd + >, + cutlass::MatrixShape<1, 1> >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaTensorOpFastF32< + WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC, + Policy, PartitionsK, AccumulatorsInRowMajor>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_with_reduction_tensor_op.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_with_reduction_tensor_op.h new file mode 100644 index 0000000000000000000000000000000000000000..63effe87e5d47b2067f4dcfc70e2eade8d84c61e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_with_reduction_tensor_op.h @@ -0,0 +1,92 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Default warp-level GEMM operators selected by data type, size, and layouts of operands. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/warp/mma_with_reduction_tensor_op.h" + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A elements + typename ElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Data type of B elements + typename ElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Element type of C matrix + typename ElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Operator describing the tensor operation + typename Operator_, + /// Reduce operand A or B along K dimension + bool ReduceKForA_, + /// Number of partitions along K dimension + int PartitionsK = 1, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false> +struct DefaultMmaWithReductionTensorOp { + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Mma, + cutlass::MatrixShape<1, 1> >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaWithReductionTensorOp< + WarpShape_, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, + Policy, ReduceKForA_, PartitionsK, AccumulatorsInRowMajor>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_wmma_tensor_op.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_wmma_tensor_op.h new file mode 100644 index 0000000000000000000000000000000000000000..4f951d49bc3989b3d3f3012d28677ee07089c461 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_wmma_tensor_op.h @@ -0,0 +1,130 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Default warp-level GEMM operators selected by data type, size, and layouts of operands. +*/ + +#pragma once + +#include "cutlass/arch/wmma.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/warp/mma_tensor_op_wmma.h" + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + ///< Size of the Gemm problem (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename ElementC_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Operator describing the tensor operation + typename Operator_ = arch::OpMultiplyAdd, + /// Number of partitions along K dimension + int PartitionsK = 1 +> +struct DefaultMmaTensorOpWmma; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for m-by-n-by-kgroup +template < + ///< Shape of one matrix production operation (concept: GemmShape) + typename WarpShape_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Data type of A elements + typename ElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Data type of B elements + typename ElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Element type of C matrix + typename ElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Operator describing the tensor operation + typename Operator_, + /// Number of partitions along K dimension + int PartitionsK> +struct DefaultMmaTensorOpWmma { + using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< + cutlass::arch::Wmma< + InstructionShape_, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + Operator_>, + cutlass::MatrixShape<1, 1> >; + + // Define the warp-level tensor op + using Type = cutlass::gemm::warp::MmaTensorOpWmma< + WarpShape_, + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + Policy, + PartitionsK>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +#endif diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/layernorm_scale_bias_transform.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/layernorm_scale_bias_transform.h new file mode 100644 index 0000000000000000000000000000000000000000..ed903170bb355bd551dfee38c6f603f48d458282 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/layernorm_scale_bias_transform.h @@ -0,0 +1,139 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing warp-level per channel scale+bias+relu before + matrix multiply-accumulate operations targeting Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/platform/platform.h" + +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/arch/mma_sm75.h" +#include "cutlass/arch/mma_sm80.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma.h" + +#include "cutlass/gemm/warp/mma_tensor_op_policy.h" + +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct LayernormScaleBiasTransform { + + using T = typename FragmentActivations::Element; + + static int const NumActivations = FragmentActivations::kElements; + static int const NumVarMean = FragmentVarMean::kElements; + static int const NumGammaBeta = FragmentGammaBeta::kElements; + static int const MmaElements = 2; + // One element has one scale and one bias + static int const MmaScaleBiasPair = 2; + // 16816 has 2 columns and 2 rows + static int const MmaCols = 2; + static int const MmaRows = 2; + + using MmaOperand = Array; + using VarMeanOperand = Array<__half2, MmaScaleBiasPair>; + using GammaBetaOperand = Array; + + CUTLASS_DEVICE + void transform(MmaOperand &activations, + VarMeanOperand const &var_mean, + GammaBetaOperand const &gamma_beta) { + +#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)) + uint32_t *ptr_activations = reinterpret_cast(&activations); + uint32_t const *ptr_var_mean = reinterpret_cast(&var_mean); + uint32_t const *ptr_gamma_beta = reinterpret_cast(&gamma_beta); + + // Apply per channel scale+bias+relu if the data is not a special NaN + // (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0. + + // We assumes the pair of FP16 are either both inbound or both out-of-bound. + // It requires C to be an even number. + asm volatile( + "{\n\t" + " fma.rn.f16x2 %0, %1, %2, %3;\n" + " fma.rn.f16x2 %0, %4, %0, %5;\n" + "}\n" + : "=r"(ptr_activations[0]) + : "r"(ptr_var_mean[0]), "r"(ptr_activations[0]), + "r"(ptr_var_mean[1]), + "r"(ptr_gamma_beta[0]), "r"(ptr_gamma_beta[1])); +#else + assert(0); +#endif + } + + CUTLASS_DEVICE + void operator()(FragmentActivations &activations, + FragmentVarMean const &var_mean, + FragmentGammaBeta const &gamma_beta) { + MmaOperand *ptr_activations = reinterpret_cast(&activations); + VarMeanOperand const *ptr_var_mean = + reinterpret_cast(&var_mean); + GammaBetaOperand const *ptr_gamma_beta = + reinterpret_cast(&gamma_beta); + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < (NumActivations / MmaElements); ++i) { + transform(ptr_activations[i], + ptr_var_mean[i / (MmaCols * MmaRows) * MmaRows + i % MmaRows], + ptr_gamma_beta[(i / MmaScaleBiasPair) % MmaCols]); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma.h new file mode 100644 index 0000000000000000000000000000000000000000..1f3ca94ecb9899f7bd08aea60924a18102400171 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma.h @@ -0,0 +1,60 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates exposing architecture support for warp-level multiply-add operations +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Query the number of threads per warp +template +struct WarpSize { + static int const value = 32; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op.h new file mode 100644 index 0000000000000000000000000000000000000000..f828355c09438f7daa7329f0f9315909b4f0fbf5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op.h @@ -0,0 +1,1168 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing warp-level matrix multiply-accumulate operations targeting + Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/array.h" +#include "cutlass/complex.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/functional.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/arch/mma_sm75.h" +#include "cutlass/arch/mma_sm80.h" +#include "cutlass/arch/mma_sm90.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma.h" + +#include "cutlass/gemm/warp/mma_tensor_op_policy.h" +#include "cutlass/gemm/warp/mma_tensor_op.h" + +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" +#include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template < + /// Data type of real & imag members of complex numbers in the SourceFragment + typename RealElement, + /// Destination fragment required by the mma operation + typename DestinationFragment, + /// Source fragment holding complex elements + typename SourceFragment, + /// Number of mma operations performed + typename MmaIterations, + /// Shape of operand elements + typename MmaOperandShape, + /// Complex transform on A operand + ComplexTransform Transform_, + /// Operand A or Operand B + Operand Operand_, + /// Floating-point rounding style + FloatRoundStyle Round_> +struct UnpackComplexConvertAndPackForMma; + +// Partial specialization for OperandA and Congruous smem layout +template < + typename RealElement, + typename DestinationFragment, + typename SourceFragment, + typename MmaIterations, + typename MmaOperandShape, + ComplexTransform Transform_, + FloatRoundStyle Round_> +struct UnpackComplexConvertAndPackForMma < + RealElement, + DestinationFragment, + SourceFragment, + MmaIterations, + MmaOperandShape, + Transform_, + Operand::kA, + Round_> { + + // + // Type definitions + // + static Operand const kOperand = Operand::kA; + static ComplexTransform const kTransform = Transform_; + static FloatRoundStyle const kRound = Round_; + + // Data type of elements in the destination fragment + using MmaElement = typename DestinationFragment::Element; + + // Numeric convertor MmaElement <= RealElement + using Converter = NumericConverter; + + // Operand layout parameters + using SourceFragmentLayout = layout::ColumnMajor; + static int const kLdm = MmaIterations::kRow * MmaOperandShape::kRow; + + /// Ctor + CUTLASS_DEVICE + UnpackComplexConvertAndPackForMma() {} + + CUTLASS_DEVICE + void operator()(DestinationFragment *dest, SourceFragment const &source) { + + Converter convert_op; + SourceFragmentLayout layout(kLdm); + + CUTLASS_PRAGMA_UNROLL + for(int i=0; i and apply rounding on real and imag parts + MmaElement a = convert_op(source[layout(MatrixCoord{row,col})].real()); + MmaElement b = convert_op(source[layout(MatrixCoord{row,col})].imag()); + + // Unpack rounded complex and pack into DestinationFragment for mma operation + dest[i][pos] = a; + dest[i+MmaIterations::kRow][pos++] = (kTransform == ComplexTransform::kConjugate ? -b : b); + + } + } + } + } +}; + +// Partial specialization for OperandB and Congruous smem layout +template < + typename RealElement, + typename DestinationFragment, + typename SourceFragment, + typename MmaIterations, + typename MmaOperandShape, + ComplexTransform Transform_, + FloatRoundStyle Round_> +struct UnpackComplexConvertAndPackForMma < + RealElement, + DestinationFragment, + SourceFragment, + MmaIterations, + MmaOperandShape, + Transform_, + Operand::kB, + Round_> { + + // + // Type definitions + // + static Operand const kOperand = Operand::kB; + static ComplexTransform const kTransform = Transform_; + static FloatRoundStyle const kRound = Round_; + + // Data type of elements in the destination fragment + using MmaElement = typename DestinationFragment::Element; + + // Numeric convertor MmaElement <= RealElement + using Converter = NumericConverter; + + // Operand layout parameters + using SourceFragmentLayout = layout::RowMajor; + static int const kLdm = MmaIterations::kColumn * MmaOperandShape::kColumn; + + /// Ctor + CUTLASS_DEVICE + UnpackComplexConvertAndPackForMma() {} + + CUTLASS_HOST_DEVICE + void operator()(DestinationFragment *dest, SourceFragment const &source) { + + Converter convert_op; + SourceFragmentLayout layout(kLdm); + + CUTLASS_PRAGMA_UNROLL + for(int i=0; i apply rounding on real and imag parts + MmaElement a = convert_op(source[layout(MatrixCoord{row,col})].real()); + MmaElement b = convert_op(source[layout(MatrixCoord{row,col})].imag()); + + // Unpack rounded complex and pack into DestinationFragment for mma operation + dest[i][pos] = a; + dest[i+MmaIterations::kColumn][pos++] = (kTransform == ComplexTransform::kConjugate ? -b : b); + } + } + } + } +}; +} // namespace detail + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename RealElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename RealElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename RealElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Complex transform on A operand + ComplexTransform TransformA = ComplexTransform::kNone, + /// Complex transform on B operand + ComplexTransform TransformB = ComplexTransform::kNone, + /// Do source operands need more than one elements + bool GeneralizedOperatorElements = false, + /// Used for partial specialization + typename Enable = bool +> +class MmaComplexTensorOp; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex*complex+complex => complex using real-valued TensorOps +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename RealElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename RealElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename RealElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Complex transform on A operand + ComplexTransform TransformA, + /// Complex transform on B operand + ComplexTransform TransformB +> +class MmaComplexTensorOp< + Shape_, + complex, + LayoutA_, + complex, + LayoutB_, + complex, + LayoutC_, + Policy_, + TransformA, + TransformB> { +public: + /// Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + /// Data type of multiplicand A + using ElementA = complex; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of multiplicand B + using ElementB = complex; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of accumulator matrix C + using ElementC = complex; + + /// Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaLanePolicyTensorOp) + using Policy = Policy_; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename Policy::Operator; + + /// Architecture tag from underlying instruction + using ArchTag = typename ArchMmaOperator::ArchTag; + + /// Indicates class of matrix operator + using OperatorClass = arch::OpClassTensorOp; + + /// Shape of underlying instruction + using InstructionShape = typename ArchMmaOperator::Shape; + + /// Indicates math operator + using MathOperator = arch::OpMultiplyAddComplex; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = TransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = TransformB; + + /// Number of threads participating in warp-level matrix product + static int const kThreadCount = 32; + +public: + + /// Iterates over the A operand in memory + using IteratorA = MmaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kA, + ElementA, + LayoutA, + MatrixShape, + Policy::OpDelta::kRow, + 32, + 1 + >; + + /// Storage for A tile + using FragmentA = typename IteratorA::Fragment; + + /// Storage for transformed A tile + using TransformedFragmentA = FragmentA; + + /// Iterates over the B operand in memory + using IteratorB = MmaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kB, + ElementB, + LayoutB, + MatrixShape, + Policy::OpDelta::kColumn, + 32, + 1 + >; + + /// Storage for B tile + using FragmentB = typename IteratorB::Fragment; + + /// Storage for transformed B tile + using TransformedFragmentB = FragmentB; + + static_assert( + !(Shape::kM % ArchMmaOperator::Shape::kM) && + !(Shape::kN % ArchMmaOperator::Shape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + + /// Number of mma operations performed + using MmaIterations = MatrixShape< + Shape::kM / ArchMmaOperator::Shape::kM, + Shape::kN / ArchMmaOperator::Shape::kN + >; + + /// Iterates over the C operand in memory + using IteratorC = MmaTensorOpAccumulatorTileIterator< + MatrixShape, + ElementC, + LayoutC, + typename ArchMmaOperator::Shape, + typename Policy::OpDelta>; + + /// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this + /// storage arrangement is to be considered 'planar complex' in the sense that all real-valued + /// parts are stored consecutively followed by all imaginary parts. This matches the structure + /// of Tensor Cores which are always real-valued matrix multiplies. + using FragmentC = typename IteratorC::Fragment; + + static_assert( + FragmentC::kElements == 2 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements, + "Unexpected planar complex fragment length."); + +private: + + // + // Data members + // + + /// Underlying real-valued matrix multiply operator (concept: arch::Mma) + ArchMmaOperator mma; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + MmaComplexTensorOp() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + FragmentA const &A, + FragmentB const &B, + FragmentC const &C + ) const { + + // Alias types for underlying real-valued matrix multiply operator + using MmaOperandA = typename ArchMmaOperator::FragmentA; + using MmaOperandB = typename ArchMmaOperator::FragmentB; + using MmaOperandC = typename ArchMmaOperator::FragmentC; + + static_assert(MmaOperandA::kElements == 1, + "This implementation only supports math instructions in which exactly one element is needed for the A operand." + "We can geneneralize later."); + + static_assert(MmaOperandB::kElements == 1, + "This implementation only supports math instructions in which exactly one element is needed for the B operand." + "We can geneneralize later."); + + D = C; + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < MmaIterations::kRow; ++m) { + + // mma(accum.real(), a.real(), b.real(), accum.real()); + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + // Pack operands together. This may result in actual MOVs + MmaOperandA operand_A; + MmaOperandB operand_B; + + operand_A[0] = A[m].real(); + operand_B[0] = B[n].real(); + + // Real-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow); + + mma(*accum, operand_A, operand_B, *accum); + } + + // mma(accum.imag(), a.real(), b.imag(), accum.imag()); + CUTLASS_PRAGMA_UNROLL + for (int n = MmaIterations::kColumn - 1; n >= 0; --n) { + + // Pack operands together. This may result in actual MOVs + MmaOperandA operand_A; + MmaOperandB operand_B; + + operand_A[0] = A[m].real(); + operand_B[0] = (kTransformB == ComplexTransform::kConjugate ? -B[n].imag() : B[n].imag()); + + // Complex-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow) + MmaIterations::kCount; + + mma(*accum, operand_A, operand_B, *accum); + } + + // mma(accum.real(), -a.imag(), b.imag(), accum.real()) + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + // Pack operands together. This may result in actual MOVs + MmaOperandA operand_A; + MmaOperandB operand_B; + + // A imaginary part is intentionally negated + operand_A[0] = (kTransformA == ComplexTransform::kConjugate ? A[m].imag() : -A[m].imag()); + operand_B[0] = (kTransformB == ComplexTransform::kConjugate ? -B[n].imag() : B[n].imag()); + + // Real-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow); + + mma(*accum, operand_A, operand_B, *accum); + } + + CUTLASS_PRAGMA_UNROLL + for (int n = MmaIterations::kColumn - 1; n >= 0; --n) { + + // Pack operands together. This may result in actual MOVs + MmaOperandA operand_A; + MmaOperandB operand_B; + + operand_A[0] = (kTransformA == ComplexTransform::kConjugate ? -A[m].imag() : A[m].imag()); + operand_B[0] = B[n].real(); + + // Complex-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow) + MmaIterations::kCount; + + mma(*accum, operand_A, operand_B, *accum); + } + } + } + + /// Transform the mma operands to the required types + CUTLASS_DEVICE + void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, + FragmentA const &A, FragmentB const &B) const { + dst_A = A; + dst_B = B; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex*complex+complex => complex: +// Operands data type: complex +// Rounding: float -> tfloat32_t (round half_ulp_truncate nearest) +// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 +// Output data type: complex +// +///////////////////////////////////////////////////////////////////////////////////////////////// +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Complex transform on A operand + ComplexTransform TransformA, + /// Complex transform on B operand + ComplexTransform TransformB +> +class MmaComplexTensorOp< + Shape_, + complex, + LayoutA_, + complex, + LayoutB_, + complex, + LayoutC_, + Policy_, + TransformA, + TransformB> { +public: + /// Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + /// Data type of members of complex multiplicand A + using RealElementA = float; + + /// Data type of multiplicand A + using ElementA = complex; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of members of complex multiplicand B + using RealElementB = float; + + /// Data type of multiplicand B + using ElementB = complex; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of members of complex accumulator matrix C + using RealElementC = float; + + /// Data type of accumulator matrix C + using ElementC = complex; + + /// Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) + using Policy = Policy_; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename Policy::Operator; + + /// Shape of underlying instruction + using InstructionShape = typename ArchMmaOperator::Shape; + + /// Underlying arch tag + using ArchTag = typename ArchMmaOperator::ArchTag; + + /// Indicates class of matrix operator + using OperatorClass = arch::OpClassTensorOp; + + /// Indicates math operator + using MathOperator = typename arch::OpMultiplyAddComplex; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = TransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = TransformB; + + /// Number of threads participating in warp-level matrix product + static int const kThreadCount = 32; + +public: + + /// Iterates over the A operand in memory + using IteratorA = MmaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kA, + ElementA, + LayoutA, + MatrixShape, + Policy::OpDelta::kRow, + 32, + 1 + >; + + /// Storage for A tile + using FragmentA = typename IteratorA::Fragment; + + /// Storage for transformed A tile + using TransformedFragmentA = + Array; + + /// Iterates over the B operand in memory + using IteratorB = MmaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kB, + ElementB, + LayoutB, + MatrixShape, + Policy::OpDelta::kColumn, + 32, + 1 + >; + + /// Storage for B tile + using FragmentB = typename IteratorB::Fragment; + + /// Storage for transformed B tile + using TransformedFragmentB = + Array; + + static_assert( + !(Shape::kM % ArchMmaOperator::Shape::kM) && + !(Shape::kN % ArchMmaOperator::Shape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + + /// Number of complex products operations performed (one complex product needs four mma instructions) + using MmaIterations = MatrixShape< + Shape::kM / ArchMmaOperator::Shape::kM, + Shape::kN / ArchMmaOperator::Shape::kN + >; + + /// Iterates over the C operand in memory + using IteratorC = MmaTensorOpAccumulatorTileIterator< + MatrixShape, + ElementC, + LayoutC, + typename ArchMmaOperator::Shape, + typename Policy::OpDelta>; + + /// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this + /// storage arrangement is to be considered 'planar complex' in the sense that all real-valued + /// parts are stored consecutively followed by all imaginary parts. This matches the structure + /// of Tensor Cores which are always real-valued matrix multiplies. + using FragmentC = typename IteratorC::Fragment; + +private: + + // + // Data members + // + + /// Underlying real-valued matrix multiply operator (concept: arch::Mma) + ArchMmaOperator mma; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + MmaComplexTensorOp() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + TransformedFragmentA const &A, + TransformedFragmentB const &B, + FragmentC const &C + ) const { + + // Alias types for underlying real-valued matrix multiply operator + using InstMmaOperandA = typename ArchMmaOperator::FragmentA; + using InstMmaOperandB = typename ArchMmaOperator::FragmentB; + using MmaOperandC = typename ArchMmaOperator::FragmentC; + + static_assert(platform::is_same, typename ArchMmaOperator::Shape>::value, + "This implementation only supports mma.m16n8k8 math instructions."); + + static_assert(InstMmaOperandA::kElements == 4, + "This implementation only supports math instructions in which exactly four element is needed for the A operand." + "We can geneneralize later."); + + static_assert(InstMmaOperandB::kElements == 2, + "This implementation only supports math instructions in which exactly two element is needed for the B operand." + "We can geneneralize later."); + + // Instruction Operands A & B holding real part followed by imaginary part for mma operations + InstMmaOperandA const *operand_A = reinterpret_cast(&A); + InstMmaOperandB const *operand_B = reinterpret_cast(&B); + + // + // Accumulate in place + // + D = C; + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < MmaIterations::kRow; ++m) { + + // mma(accum.real(), a.real(), b.real(), accum.real()); + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + // Real-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow); + + mma(*accum, operand_A[m], operand_B[n], *accum); + } + + // mma(accum.imag(), a.real(), b.imag(), accum.imag()); + CUTLASS_PRAGMA_UNROLL + for (int n = MmaIterations::kColumn - 1; n >= 0; --n) { + + // Complex-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow) + MmaIterations::kCount; + + mma(*accum, operand_A[m], operand_B[n+MmaIterations::kColumn], *accum); + } + + // mma(accum.real(), a.imag(), -b.imag(), accum.real()) + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + // negate OperandB to accumulate -(a.imag()*b.imag()) + // negating OperandB emits less instrucitons than negating OperandA as OperandB has less elements + negate negate_op; + + // Real-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow); + + mma(*accum, operand_A[m+MmaIterations::kRow], negate_op(operand_B[n+MmaIterations::kColumn]), *accum); + } + + // mma(accum.imag(), a.imag(), b.real(), accum.imag()) + CUTLASS_PRAGMA_UNROLL + for (int n = MmaIterations::kColumn - 1; n >= 0; --n) { + + // Complex-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow) + MmaIterations::kCount; + + mma(*accum, operand_A[m+MmaIterations::kRow], operand_B[n], *accum); + } + } + } + + /// Transform the mma operands to the required types + CUTLASS_DEVICE + void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, + FragmentA const &A, FragmentB const &B) const { + // Alias types for underlying real-valued matrix multiply operator + using InstMmaOperandA = typename ArchMmaOperator::FragmentA; + using InstMmaOperandB = typename ArchMmaOperator::FragmentB; + + // + // Define conversions from source type to instruction operands' type + // + + #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 900 + FloatRoundStyle const kRoundA = FloatRoundStyle::round_to_nearest; + FloatRoundStyle const kRoundB = FloatRoundStyle::round_to_nearest; + #else + FloatRoundStyle const kRoundA = FloatRoundStyle::round_half_ulp_trunc_dntz; + FloatRoundStyle const kRoundB = FloatRoundStyle::round_half_ulp_trunc_dntz; + #endif + + detail::UnpackComplexConvertAndPackForMma < + RealElementA, + InstMmaOperandA, + FragmentA, + MmaIterations, + MatrixShape<2, 2>, + kTransformA, + Operand::kA, + kRoundA> convert_A; + + detail::UnpackComplexConvertAndPackForMma < + RealElementB, + InstMmaOperandB, + FragmentB, + MmaIterations, + MatrixShape<2, 1>, + kTransformB, + Operand::kB, + kRoundB> convert_B; + + // Convert Fragment[A|B] holding complex to InstMmaOperand[A|B] holding InstMmaOperand[A|B]::Element + convert_A(reinterpret_cast(&dst_A), A); + convert_B(reinterpret_cast(&dst_B), B); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Partial specialization for complex*complex+complex => complex: +// Operands data type: complex +// Math instruction: mma.sync.aligned.m16n8k4.f64.f64.f64.f64 +// Output data type: complex +// +///////////////////////////////////////////////////////////////////////////////////////////////// +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Complex transform on A operand + ComplexTransform TransformA, + /// Complex transform on B operand + ComplexTransform TransformB +> +class MmaComplexTensorOp< + Shape_, + complex, + LayoutA_, + complex, + LayoutB_, + complex, + LayoutC_, + Policy_, + TransformA, + TransformB, + true> { +public: + /// Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + /// Data type of members of complex multiplicand A + using RealElementA = double; + + /// Data type of multiplicand A + using ElementA = complex; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of members of complex multiplicand B + using RealElementB = double; + + /// Data type of multiplicand B + using ElementB = complex; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of members of complex accumulator matrix C + using RealElementC = double; + + /// Data type of accumulator matrix C + using ElementC = complex; + + /// Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaLanePolicyTensorOp) + using Policy = Policy_; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename Policy::Operator; + + /// Shape of underlying instruction + using InstructionShape = typename ArchMmaOperator::Shape; + + /// Underlying arch tag + using ArchTag = typename ArchMmaOperator::ArchTag; + + /// Indicates class of matrix operator + using OperatorClass = arch::OpClassTensorOp; + + /// Indicates math operator + using MathOperator = typename arch::OpMultiplyAddComplex; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = TransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = TransformB; + + /// Number of threads participating in warp-level matrix product + static int const kThreadCount = 32; + +public: + + /// Iterates over the A operand in memory + using IteratorA = MmaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kA, + ElementA, + LayoutA, + MatrixShape, + Policy::OpDelta::kRow, + 32, + 1 + >; + + /// Storage for A tile + using FragmentA = typename IteratorA::Fragment; + + /// Storage for transformed A tile + using TransformedFragmentA = FragmentA; + + /// Iterates over the B operand in memory + using IteratorB = MmaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kB, + ElementB, + LayoutB, + MatrixShape, + Policy::OpDelta::kColumn, + 32, + 1 + >; + + /// Storage for B tile + using FragmentB = typename IteratorB::Fragment; + + /// Storage for transformed B tile + using TransformedFragmentB = FragmentB; + + static_assert( + !(Shape::kM % ArchMmaOperator::Shape::kM) && + !(Shape::kN % ArchMmaOperator::Shape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + + /// Number of mma operations performed + using MmaIterations = MatrixShape< + Shape::kM / ArchMmaOperator::Shape::kM, + Shape::kN / ArchMmaOperator::Shape::kN + >; + + /// Iterates over the C operand in memory + using IteratorC = MmaTensorOpAccumulatorTileIterator< + MatrixShape, + ElementC, + LayoutC, + typename ArchMmaOperator::Shape, + typename Policy::OpDelta>; + + /// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this + /// storage arrangement is to be considered 'planar complex' in the sense that all real-valued + /// parts are stored consecutively followed by all imaginary parts. This matches the structure + /// of Tensor Cores which are always real-valued matrix multiplies. + using FragmentC = typename IteratorC::Fragment; + + static_assert( + FragmentC::kElements == 2 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements, + "Unexpected planar complex fragment length."); + +private: + + // + // Data members + // + + /// Underlying real-valued matrix multiply operator (concept: arch::Mma) + ArchMmaOperator mma; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + MmaComplexTensorOp() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + FragmentA const &A, + FragmentB const &B, + FragmentC const &C + ) const { + + // Alias types for underlying real-valued matrix multiply operator + using MmaOperandA = typename ArchMmaOperator::FragmentA; + using MmaOperandB = typename ArchMmaOperator::FragmentB; + using MmaOperandC = typename ArchMmaOperator::FragmentC; + + D = C; + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < MmaIterations::kRow; ++m) { + + // mma(accum.real(), a.real(), b.real(), accum.real()); + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + // Pack operands together. This may result in actual MOVs + MmaOperandA operand_A; + MmaOperandB operand_B; + + CUTLASS_PRAGMA_UNROLL + for (int mk = 0; mk < MmaOperandA::kElements; ++mk) + operand_A[mk] = A[m*MmaOperandA::kElements + mk].real(); + + CUTLASS_PRAGMA_UNROLL + for (int nk = 0; nk < MmaOperandB::kElements; ++nk) + operand_B[nk] = B[n*MmaOperandB::kElements + nk].real(); + + // Real-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow); + + mma(*accum, operand_A, operand_B, *accum); + } + + // mma(accum.imag(), a.real(), b.imag(), accum.imag()); + CUTLASS_PRAGMA_UNROLL + for (int n = MmaIterations::kColumn - 1; n >= 0; --n) { + + // Pack operands together. This may result in actual MOVs + MmaOperandA operand_A; + MmaOperandB operand_B; + + CUTLASS_PRAGMA_UNROLL + for (int mk = 0; mk < MmaOperandA::kElements; ++mk) + operand_A[mk] = A[m*MmaOperandA::kElements + mk].real(); + + CUTLASS_PRAGMA_UNROLL + for (int nk = 0; nk < MmaOperandB::kElements; ++nk) + operand_B[nk] = (kTransformB == ComplexTransform::kConjugate ? + -B[n*MmaOperandB::kElements + nk].imag() : B[n*MmaOperandB::kElements + nk].imag()); + + // Complex-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow) + MmaIterations::kCount; + + mma(*accum, operand_A, operand_B, *accum); + } + + // mma(accum.real(), -a.imag(), b.imag(), accum.real()) + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + // Pack operands together. This may result in actual MOVs + MmaOperandA operand_A; + MmaOperandB operand_B; + + // A imaginary part is intentionally negated + CUTLASS_PRAGMA_UNROLL + for (int mk = 0; mk < MmaOperandA::kElements; ++mk) + operand_A[mk] = (kTransformA == ComplexTransform::kConjugate ? + A[m*MmaOperandA::kElements + mk].imag() : -A[m*MmaOperandA::kElements + mk].imag()); + + CUTLASS_PRAGMA_UNROLL + for (int nk = 0; nk < MmaOperandB::kElements; ++nk) + operand_B[nk] = (kTransformB == ComplexTransform::kConjugate ? + -B[n*MmaOperandB::kElements + nk].imag() : B[n*MmaOperandB::kElements + nk].imag()); + + // Real-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow); + + mma(*accum, operand_A, operand_B, *accum); + } + + // mma(accum.imag(), a.imag(), b.real(), accum.imag()) + CUTLASS_PRAGMA_UNROLL + for (int n = MmaIterations::kColumn - 1; n >= 0; --n) { + + // Pack operands together. This may result in actual MOVs + MmaOperandA operand_A; + MmaOperandB operand_B; + + CUTLASS_PRAGMA_UNROLL + for (int mk = 0; mk < MmaOperandA::kElements; ++mk) + operand_A[mk] = (kTransformA == ComplexTransform::kConjugate ? + -A[m*MmaOperandA::kElements + mk].imag() : A[m*MmaOperandA::kElements + mk].imag()); + + CUTLASS_PRAGMA_UNROLL + for (int nk = 0; nk < MmaOperandB::kElements; ++nk) + operand_B[nk] = B[n*MmaOperandB::kElements + nk].real(); + + // Complex-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow) + MmaIterations::kCount; + + mma(*accum, operand_A, operand_B, *accum); + } + } + } + + /// Transform the mma operands to the required types + CUTLASS_DEVICE + void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, + FragmentA const &A, FragmentB const &B) const { + dst_A = A; + dst_B = B; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op_fast_f32.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op_fast_f32.h new file mode 100644 index 0000000000000000000000000000000000000000..4db983d6dd23acd2782daccea2b25fc9d275f3ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op_fast_f32.h @@ -0,0 +1,663 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Templates implementing warp-level matrix multiply-accumulate operations targeting + Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/array.h" +#include "cutlass/complex.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/functional.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/arch/mma_sm75.h" +#include "cutlass/arch/mma_sm80.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma.h" + +#include "cutlass/gemm/warp/mma_tensor_op_policy.h" +#include "cutlass/gemm/warp/mma_tensor_op.h" + +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" +#include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +namespace detail { + +template < + /// Data type of real & imag members of complex numbers in the SourceFragment + typename RealElement, + /// Destination fragment required by the mma operation + typename DestinationFragment, + /// Source fragment holding complex elements + typename SourceFragment, + /// Number of mma operations performed + typename MmaIterations, + /// Shape of operand elements + typename MmaOperandShape, + /// Complex transform on A operand + ComplexTransform Transform_, + /// Operand A or Operand B + Operand Operand_, + /// Floating-point rounding style for big part + FloatRoundStyle RoundBig_, + /// Floating-point rounding style for small part + FloatRoundStyle RoundSmall_> +struct UnpackComplexConvertAndPackForMmaFastF32; + +// Partial specialization for OperandA and Congruous smem layout +template < + typename RealElement, + typename DestinationFragment, + typename SourceFragment, + typename MmaIterations, + typename MmaOperandShape, + ComplexTransform Transform_, + FloatRoundStyle RoundBig_, + FloatRoundStyle RoundSmall_> +struct UnpackComplexConvertAndPackForMmaFastF32 < + RealElement, + DestinationFragment, + SourceFragment, + MmaIterations, + MmaOperandShape, + Transform_, + Operand::kA, + RoundBig_, + RoundSmall_> { + + // + // Type definitions + // + static Operand const kOperand = Operand::kA; + static ComplexTransform const kTransform = Transform_; + static FloatRoundStyle const kRoundBig = RoundBig_; + static FloatRoundStyle const kRoundSmall = RoundSmall_; + + // Data type of elements in the destination fragment + using MmaElement = typename DestinationFragment::Element; + + // Numeric convertor MmaElementBig, MmaElementSmall <= RealElement + using Converter = NumericConverterFastF32; + + // Operand layout parameters + using SourceFragmentLayout = layout::ColumnMajor; + static int const kLdm = MmaIterations::kRow * MmaOperandShape::kRow; + + // BigSmall Fragment holding two TF32 elements (big, small) for every float + using BigSmallFragment = Array; + + /// Index in fargments for the big and small part + static int const kBigIndex = 0; + static int const kSmallIndex = 1; + + /// Ctor + CUTLASS_DEVICE + UnpackComplexConvertAndPackForMmaFastF32() {} + + CUTLASS_DEVICE + void operator()(DestinationFragment *dest, SourceFragment const &source) { + + Converter convert_op; + SourceFragmentLayout layout(kLdm); + + DestinationFragment *dest_big_ = reinterpret_cast(dest); + DestinationFragment *dest_small_ = reinterpret_cast(&dest[MmaIterations::kRow * 2]); + + CUTLASS_PRAGMA_UNROLL + for(int i=0; i and apply rounding on real and imag parts + BigSmallFragment a = convert_op(source[layout(MatrixCoord{row,col})].real()); + BigSmallFragment b = convert_op(source[layout(MatrixCoord{row,col})].imag()); + + // Unpack rounded complex and pack into DestinationFragment for mma operation + dest_big_[i][pos] = a[kBigIndex]; + dest_big_[i+MmaIterations::kRow][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kBigIndex] : b[kBigIndex]); + + // Unpack rounded complex and pack into DestinationFragment for mma operation + dest_small_[i][pos] = a[kSmallIndex]; + dest_small_[i+MmaIterations::kRow][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kSmallIndex] : b[kSmallIndex]); + + // Next position + pos++; + } + } + } + } +}; + +// Partial specialization for OperandB and Congruous smem layout +template < + typename RealElement, + typename DestinationFragment, + typename SourceFragment, + typename MmaIterations, + typename MmaOperandShape, + ComplexTransform Transform_, + FloatRoundStyle RoundBig_, + FloatRoundStyle RoundSmall_> +struct UnpackComplexConvertAndPackForMmaFastF32 < + RealElement, + DestinationFragment, + SourceFragment, + MmaIterations, + MmaOperandShape, + Transform_, + Operand::kB, + RoundBig_, + RoundSmall_> { + + // + // Type definitions + // + static Operand const kOperand = Operand::kB; + static ComplexTransform const kTransform = Transform_; + static FloatRoundStyle const kRoundBig = RoundBig_; + static FloatRoundStyle const kRoundSmall = RoundSmall_; + + // Data type of elements in the destination fragment + using MmaElement = typename DestinationFragment::Element; + + // Numeric convertor MmaElementBig, MmaElementSmall <= RealElement + using Converter = NumericConverterFastF32; + + // Operand layout parameters + using SourceFragmentLayout = layout::RowMajor; + static int const kLdm = MmaIterations::kColumn * MmaOperandShape::kColumn; + + // BigSmall Fragment holding two TF32 elements (big, small) for every float + using BigSmallFragment = Array; + + /// Index in fargments for the big and small part + static int const kBigIndex = 0; + static int const kSmallIndex = 1; + + /// Ctor + CUTLASS_DEVICE + UnpackComplexConvertAndPackForMmaFastF32() {} + + CUTLASS_HOST_DEVICE + void operator()(DestinationFragment *dest, SourceFragment const &source) { + + Converter convert_op; + SourceFragmentLayout layout(kLdm); + + DestinationFragment *dest_big_ = reinterpret_cast(dest); + DestinationFragment *dest_small_ = reinterpret_cast(&dest[MmaIterations::kColumn * 2]); + + CUTLASS_PRAGMA_UNROLL + for(int i=0; i apply rounding on real and imag parts + BigSmallFragment a = convert_op(source[layout(MatrixCoord{row,col})].real()); + BigSmallFragment b = convert_op(source[layout(MatrixCoord{row,col})].imag()); + + // Unpack rounded complex and pack into DestinationFragment for mma operation + dest_big_[i][pos] = a[kBigIndex]; + dest_big_[i+MmaIterations::kColumn][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kBigIndex] : b[kBigIndex]); + + // Unpack rounded complex and pack into DestinationFragment for mma operation + dest_small_[i][pos] = a[kSmallIndex]; + dest_small_[i+MmaIterations::kColumn][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kSmallIndex] : b[kSmallIndex]); + + // next position + pos++; + } + } + } + } +}; +} // namespace detail + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename RealElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename RealElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename RealElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Complex transform on A operand + ComplexTransform TransformA = ComplexTransform::kNone, + /// Complex transform on B operand + ComplexTransform TransformB = ComplexTransform::kNone, + /// Used for partial specialization + typename Enable = bool +> +class MmaComplexTensorOpFastF32; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex*complex+complex => complex: +// Operands data type: complex +// Rounding: float -> tfloat32_t (round half_ulp_truncate nearest) +// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 +// Output data type: complex +// +///////////////////////////////////////////////////////////////////////////////////////////////// +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Complex transform on A operand + ComplexTransform TransformA, + /// Complex transform on B operand + ComplexTransform TransformB, + /// Used for partial specialization + typename Enable +> +class MmaComplexTensorOpFastF32< + Shape_, + complex, + LayoutA_, + complex, + LayoutB_, + complex, + LayoutC_, + Policy_, + TransformA, + TransformB, + Enable> { +public: + /// Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + /// Data type of members of complex multiplicand A + using RealElementA = float; + + /// Data type of multiplicand A + using ElementA = complex; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of members of complex multiplicand B + using RealElementB = float; + + /// Data type of multiplicand B + using ElementB = complex; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of members of complex accumulator matrix C + using RealElementC = float; + + /// Data type of accumulator matrix C + using ElementC = complex; + + /// Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) + using Policy = Policy_; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename Policy::Operator; + + /// Shape of underlying instruction + using InstructionShape = typename ArchMmaOperator::Shape; + + /// Underlying arch tag + using ArchTag = typename ArchMmaOperator::ArchTag; + + /// Indicates class of matrix operator + using OperatorClass = arch::OpClassTensorOp; + + /// Indicates math operator + using MathOperator = arch::OpMultiplyAddComplexFastF32; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = TransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = TransformB; + + /// Number of threads participating in warp-level matrix product + static int const kThreadCount = 32; + + + /// Tune F32 to TF32 big small conversion for complex operation + /// Different combination of big small conversin can cause different tradeoff + /// between speed and accuracy. Generally, use round_half_ulp_truncate can + /// improve the performance but hur the accuracy. + using ComplexFastF32 = FastF32 < + FloatRoundStyle::round_toward_zero, // kRoundBigA + FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallA + FloatRoundStyle::round_toward_zero, // kRoundBigB + FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallB + TensorFloat32Op::k3xTF32 // Number of TF32 operations + >; + + /// Index in fargments for the big and small part + static int const kBigIndex = 0; + static int const kSmallIndex = 1; + +public: + + /// Iterates over the A operand in memory + using IteratorA = MmaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kA, + ElementA, + LayoutA, + MatrixShape, + Policy::OpDelta::kRow, + 32, + 1 + >; + + /// Storage for A tile + using FragmentA = typename IteratorA::Fragment; + + /// Storage for transformed A tile + // (4 times the original FragmentA::kElements) + // (real_big), (imag_big), (real_small), (imag_small) + using TransformedFragmentA = Array; + + // Fragment bisecting big and small sections + // (real_big, imag_big), (real_small, imag_small) + using AccessTypeFragmentA = Array; + + /// Iterates over the B operand in memory + using IteratorB = MmaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kB, + ElementB, + LayoutB, + MatrixShape, + Policy::OpDelta::kColumn, + 32, + 1 + >; + + /// Storage for B tile + using FragmentB = typename IteratorB::Fragment; + + /// Storage for transformed B tile + // (4 times the original FragmentB::kElements) + // (real_big), (imag_big), (real_small), (imag_small) + using TransformedFragmentB = Array; + + // Fragment bisecting big and small sections + // (real_big, imag_big), (real_small, imag_small) + using AccessTypeFragmentB = Array; + + static_assert( + !(Shape::kM % ArchMmaOperator::Shape::kM) && + !(Shape::kN % ArchMmaOperator::Shape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + + /// Number of complex products operations performed (one complex product needs four mma instructions) + using MmaIterations = MatrixShape< + Shape::kM / ArchMmaOperator::Shape::kM, + Shape::kN / ArchMmaOperator::Shape::kN + >; + + /// Iterates over the C operand in memory + using IteratorC = MmaTensorOpAccumulatorTileIterator< + MatrixShape, + ElementC, + LayoutC, + typename ArchMmaOperator::Shape, + typename Policy::OpDelta>; + + /// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this + /// storage arrangement is to be considered 'planar complex' in the sense that all real-valued + /// parts are stored consecutively followed by all imaginary parts. This matches the structure + /// of Tensor Cores which are always real-valued matrix multiplies. + using FragmentC = typename IteratorC::Fragment; + + // + // Alias types for underlying real-valued matrix multiply operator + // + using InstMmaOperandA = typename ArchMmaOperator::FragmentA; + using InstMmaOperandB = typename ArchMmaOperator::FragmentB; + using MmaOperandC = typename ArchMmaOperator::FragmentC; + + static_assert(platform::is_same, typename ArchMmaOperator::Shape>::value, + "This implementation only supports mma.m16n8k8 math instructions."); + + static_assert(InstMmaOperandA::kElements == 4, + "This implementation only supports math instructions in which exactly four element is needed for the A operand." + "We can geneneralize later."); + + static_assert(InstMmaOperandB::kElements == 2, + "This implementation only supports math instructions in which exactly two element is needed for the B operand." + "We can geneneralize later."); + +private: + + // + // Data members + // + + /// Underlying real-valued matrix multiply operator (concept: arch::Mma) + ArchMmaOperator mma; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + MmaComplexTensorOpFastF32() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + TransformedFragmentA const &A, + TransformedFragmentB const &B, + FragmentC const &C + ) const { + + AccessTypeFragmentA const *complex_A = reinterpret_cast(&A); + AccessTypeFragmentB const *complex_B = reinterpret_cast(&B); + + // + // Accumulate in place + // + D = C; + + + complex_mma_operator(D, complex_A[kSmallIndex], complex_B[kBigIndex], D); + + complex_mma_operator(D, complex_A[kBigIndex], complex_B[kSmallIndex], D); + + complex_mma_operator(D, complex_A[kBigIndex], complex_B[kBigIndex], D); + + if (ComplexFastF32::kPrecision == TensorFloat32Op::k4xTF32) + complex_mma_operator(D, complex_A[kSmallIndex], complex_B[kSmallIndex], D); + } + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void complex_mma_operator( + FragmentC &D, + AccessTypeFragmentA const &complex_A, + AccessTypeFragmentB const &complex_B, + FragmentC const &C + ) const { + + // Instruction Operands A & B holding real part followed by imaginary part for mma operations + InstMmaOperandA const *operand_A = reinterpret_cast(&complex_A); + InstMmaOperandB const *operand_B = reinterpret_cast(&complex_B); + + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < MmaIterations::kRow; ++m) { + + // mma(accum.real(), a.real(), b.real(), accum.real()); + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + // Real-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow); + + mma(*accum, operand_A[m], operand_B[n], *accum); + } + + // mma(accum.imag(), a.real(), b.imag(), accum.imag()); + CUTLASS_PRAGMA_UNROLL + for (int n = MmaIterations::kColumn - 1; n >= 0; --n) { + + // Complex-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow) + MmaIterations::kCount; + + mma(*accum, operand_A[m], operand_B[n+MmaIterations::kColumn], *accum); + } + + // mma(accum.real(), a.imag(), -b.imag(), accum.real()) + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + // negate OperandB to accumulate -(a.imag()*b.imag()) + // negating OperandB emits less instrucitons than negating OperandA as OperandB has less elements + negate negate_op; + + // Real-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow); + + mma(*accum, operand_A[m+MmaIterations::kRow], negate_op(operand_B[n+MmaIterations::kColumn]), *accum); + } + + // mma(accum.imag(), a.imag(), b.real(), accum.imag()) + CUTLASS_PRAGMA_UNROLL + for (int n = MmaIterations::kColumn - 1; n >= 0; --n) { + + // Complex-valued accumulator part + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow) + MmaIterations::kCount; + + mma(*accum, operand_A[m+MmaIterations::kRow], operand_B[n], *accum); + } + } + } + + /// Transform the mma operands to the required types + CUTLASS_DEVICE + void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, + FragmentA const &A, FragmentB const &B) const { + + detail::UnpackComplexConvertAndPackForMmaFastF32 < + RealElementA, + InstMmaOperandA, + FragmentA, + MmaIterations, + MatrixShape<2, 2>, + kTransformA, + Operand::kA, + ComplexFastF32::kRoundBigA, + ComplexFastF32::kRoundSmallA> convert_A; + + detail::UnpackComplexConvertAndPackForMmaFastF32 < + RealElementB, + InstMmaOperandB, + FragmentB, + MmaIterations, + MatrixShape<2, 1>, + kTransformB, + Operand::kB, + ComplexFastF32::kRoundBigB, + ComplexFastF32::kRoundSmallB> convert_B; + + // Convert Fragment[A|B] holding complex to InstMmaOperand[A|B] holding InstMmaOperand[A|B]::Element + convert_A(reinterpret_cast(&dst_A), A); + convert_B(reinterpret_cast(&dst_B), B); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h new file mode 100644 index 0000000000000000000000000000000000000000..02fd4c077f0b472e3eca68386a63dc50eff7affa --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h @@ -0,0 +1,2485 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm80.h" + +#include "cutlass/platform/platform.h" +#include "cutlass/fast_math.h" + +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for loading 128b vectors of 128b elements. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::TensorOpMultiplicandCongruous128b, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + static_assert(!(Shape::kContiguous % 8) && !(Shape::kStrided % 4), "Divisibility."); + + static_assert(sizeof_bits::value == 128, "This is specialized for 128b accesses."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::TensorOpMultiplicandCongruous128b; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Load two elements per access + static int const kElementsPerAccess = 1; + + /// Policy defining internal details of tile iterator + struct Policy { + + /// Shape of one access + using Delta = layout::PitchLinearShape<8, 4>; + + /// Number of iterations to load + using Iterations = layout::PitchLinearShape< + Shape::kContiguous / Delta::kContiguous, + InstructionShape::kStrided / Delta::kStrided + >; + }; + +private: + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Pointer type used for accesses + using AccessType = AlignedArray; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + +private: + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): + stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0) { + + int quad_pair = lane_id / 8; + int quad = lane_id / 4; + int lane = lane_id % 4; + + int row = (quad & 1) * 4 + (lane ^ quad_pair); + + byte_offset_ = (row + quad_pair * stride_) * sizeof(AccessType); + + pointer_= reinterpret_cast(ref.data()); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + pointer_ += offset; + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + int offset = + (tile_offset.contiguous() * Shape::kContiguous) + + (tile_offset.strided() * InstructionShape::kStrided * stride_); + + add_pointer_offset(offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + pointer_ += stride_ * InstructionShape::kStrided; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_byte_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + AccessType *fetch_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::Iterations::kStrided; ++s) { + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::Iterations::kContiguous; ++c) { + + int access_idx = c + s * Policy::Iterations::kContiguous; + + AccessType const *source_ptr = pointer_ + + Policy::Delta::kContiguous * c + + Policy::Delta::kStrided * s * stride_; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + byte_offset + byte_offset_; + + AccessType const *source = reinterpret_cast(source_byte_ptr); + + fetch_ptr[access_idx] = *source; + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = + tile_offset.contiguous() * Shape::kContiguous + + tile_offset.strided() * InstructionShape::kStrided * stride_; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + + } +}; + +//////////////////////////////////////////////////////////////////////////////// +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicandCongruous128b, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(-tile_offset.column(), -tile_offset.row())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.strided(), tile_offset.contiguous()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +//////////////////////////////////////////////////////////////////////////////// +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicandCongruous128b, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(-tile_offset.row(), -tile_offset.column())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.contiguous(), tile_offset.strided()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// +/// Partial specialization for complex +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of underlying field of reals. + typename RealElement, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions, concept: MatrixShape) + typename OpDelta_> +class MmaTensorOpAccumulatorTileIterator< + Shape_, complex, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kC; + + /// Element type + using Element = complex; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajor; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + using OpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static_assert( + !(Shape::kRow % InstructionShape::kM) && + !(Shape::kColumn % InstructionShape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + + static_assert(platform::is_same::value, + "Layouts must be defined for logical MatrixCoord coordinate space."); + + /// Number of mma operations performed + using MmaIterations = MatrixShape; + }; + +private: + + // Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire + // shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements + // of that row. The accumulators within one row are assumed to be consecutive. + static int const kElementsPerAccess = InstructionShape::kN / 4; + static int const kRowsPerTile = 8; + static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile. It is assumed that the accumulators + /// are stored in a planar complex arrangement with the real parts as entirely contiguous + /// followed by the imaginary parts. + using Fragment = Array; + + static int const kRealIndex = 0; + static int const kImaginaryIndex = Shape::kCount / kThreads; + +private: + + /// Reference to output tensor + TensorRef ref_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator( + TensorRef const &ref, + int lane_id + ): + ref_(ref) { + + int quad = (lane_id >> 2); + int lane_in_quad = (lane_id & 3); + + MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess); + + ref_.add_coord_offset(lane_offset); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator & operator++() { + // deliberate no-op + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator & operator--() { + // deliberate no-op + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + Fragment &frag, ///< fragment to load from the tensor + Index pointer_offset) const { ///< loads a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + + int mma_accum_start = kAccumulatorRows * kElementsPerAccess * + (mma_n * Policy::MmaIterations::kRow + mma_m); + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < kAccumulatorRows; ++row) { + CUTLASS_PRAGMA_UNROLL + for (int col = 0; col < kElementsPerAccess; ++col) { + int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + + row * kRowsPerTile; + int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; + + Element z = offset_ref.at({accum_m, accum_n}); + + frag[mma_accum_start + row * kElementsPerAccess + col + kRealIndex] = z.real(); + frag[mma_accum_start + row * kElementsPerAccess + col + kImaginaryIndex] = z.imag(); + } + } + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + Fragment &frag, ///< fragment to load from the tensor + Index byte_offset) const { ///< loads a tile with a linear offset + + load_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles + + load(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles + Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset + + load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } + + /// Stores a fragment to memory + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index pointer_offset) const { ///< store a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + + int mma_accum_start = kAccumulatorRows * kElementsPerAccess * + (mma_n * Policy::MmaIterations::kRow + mma_m); + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < kAccumulatorRows; ++row) { + CUTLASS_PRAGMA_UNROLL + for (int col = 0; col < kElementsPerAccess; ++col) { + int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + + row * kRowsPerTile; + int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; + int idx = mma_accum_start + row * kElementsPerAccess + col; + + Element z(frag[kRealIndex + idx], frag[kImaginaryIndex + idx]); + + offset_ref.at({accum_m, accum_n}) = z; + } + } + } + } + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_byte_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index byte_offset) const { ///< store a tile with a linear offset + + store_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Stores a fragment to memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + Fragment &frag, ///< fragment to store to the tensor + TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles + + store(frag, tile_offset, 0); + } + + /// Stores a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + /// fragment to store to the tensor + Fragment const &frag, + /// stores a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// stores a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for loading 128b vectors of 128b elements. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::TensorOpMultiplicandCrosswise128x4, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + static_assert(!(Shape::kContiguous % 4) && !(Shape::kStrided % 8), "Divisibility."); + + static_assert(sizeof_bits::value == 128, "This is specialized for 128b accesses."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::TensorOpMultiplicandCrosswise128x4; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Load two elements per access + static int const kElementsPerAccess = 1; + + /// Policy defining internal details of tile iterator + struct Policy { + + /// Shape of one access + using Delta = layout::PitchLinearShape<4, 8>; + + /// Number of iterations to load + using Iterations = layout::PitchLinearShape< + InstructionShape::kContiguous / Delta::kContiguous, + Shape::kStrided / Delta::kStrided + >; + }; + +private: + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Pointer type used for accesses + using AccessType = AlignedArray; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + +private: + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): + stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0) { + + int quad = lane_id / 4; + int liq = lane_id % 4; + + int c = liq + (quad & 1) * 4; + int s = (quad / 2); + + byte_offset_ = (c + s * stride_) * sizeof(AccessType); + + pointer_= reinterpret_cast(ref.data()); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + pointer_ += offset; + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + // Compute the offset in units of elements. Note, the external coordinate system is + // approximately transposed with respect to the tiled internal structure + int offset = + (tile_offset.contiguous() * InstructionShape::kContiguous) * stride_ + + (tile_offset.strided() * Shape::kStrided); + + add_pointer_offset(offset); + + byte_offset_ ^= (tile_offset.contiguous() & 1) * 4 * sizeof(AccessType); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + pointer_ += stride_ * InstructionShape::kContiguous; + + byte_offset_ ^= 4 * sizeof(AccessType); + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_byte_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + AccessType *fetch_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::Iterations::kContiguous; ++c) { + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::Iterations::kStrided; ++s) { + + int access_idx = s + c * Policy::Iterations::kStrided; + + AccessType const *source_ptr = pointer_ + + Policy::Delta::kContiguous * c * stride_ + + Policy::Delta::kStrided * s; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + byte_offset + byte_offset_; + + AccessType const *source = reinterpret_cast(source_byte_ptr); + + fetch_ptr[access_idx] = *source; + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = + tile_offset.contiguous() * InstructionShape::kContiguous * stride_ + + tile_offset.strided() * Shape::kStrided; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + + } +}; + + +//////////////////////////////////////////////////////////////////////////////// +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicandCrosswise128x4, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(-tile_offset.column(), -tile_offset.row())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.strided(), tile_offset.contiguous()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + + +//////////////////////////////////////////////////////////////////////////////// +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicandCrosswise128x4, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(-tile_offset.row(), -tile_offset.column())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.contiguous(), tile_offset.strided()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Congruous shared memory layout +// Warp-level iterators for complex*complex + complex => complex +// The underlying iterators are similar to that for MMA f64*f64 + f64 = f64 +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for loading 128b vectors of 64b elements. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, cutlass::complex, + cutlass::layout::TensorOpMultiplicandCongruous64b, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + static_assert(!(Shape::kContiguous % 16) && !(Shape::kStrided % 8), "Divisibility."); + + /// Element type + using Element = cutlass::complex; + + /// Layout of source tile + using Layout = cutlass::layout::TensorOpMultiplicandCongruous64b; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Load two elements per access + static int const kElementsPerAccess = 2; + + /// Policy defining internal details of tile iterator + struct Policy { + + /// Shape of one access + using Delta = layout::PitchLinearShape<8, 4>; + + /// Number of iterations to load + using Iterations = layout::PitchLinearShape< + Shape::kContiguous / kElementsPerAccess / Delta::kContiguous, + InstructionShape::kStrided / Delta::kStrided + >; + + }; + +private: + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Pointer type used for accesses + using AccessType = AlignedArray; + + /// Internal counter used to jump to next K partition + int k_group_idx_; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + +private: + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): + stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0), + k_group_idx_(0) { + + int access_strided = lane_id / Policy::Delta::kContiguous; + int access_contiguous = (lane_id % Policy::Delta::kContiguous) ^ access_strided; + + pointer_= reinterpret_cast(ref.data()) + + access_contiguous + access_strided * stride_; + + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + byte_offset_ += offset * sizeof(Element); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + int offset = + (tile_offset.strided() * InstructionShape::kStrided) * stride_ * kElementsPerAccess + + tile_offset.contiguous() * Shape::kContiguous; + + add_pointer_offset(offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + add_tile_offset({0, 1}); + + return *this; + } + + /// Advances the iterator along the opposite of the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + add_tile_offset({0, -1}); + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_byte_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + AccessType *fetch_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::Iterations::kStrided; ++s) { + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::Iterations::kContiguous; ++c) { + + int access_idx = c + s * Policy::Iterations::kContiguous; + + AccessType const *source_ptr = pointer_ + + Policy::Delta::kContiguous * c + + Policy::Delta::kStrided * s * stride_; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + byte_offset + byte_offset_; + + AccessType const *source = reinterpret_cast(source_byte_ptr); + + fetch_ptr[access_idx] = *source; + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + + Index pointer_offset = + tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess + + tile_offset.strided() * InstructionShape::kStrided * stride_; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Crosswise shared memory layout +// Warp-level iterators for complex*complex + complex => complex +// The underlying iterators are similar to that for f64*f64 + f64 = f64 +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for loading 128b vectors of 64b elements. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, complex, + cutlass::layout::TensorOpMultiplicand64bCrosswise, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + static_assert(!(Shape::kContiguous % 4) && !(Shape::kStrided % 16), "Divisibility."); + + static_assert(sizeof_bits>::value == 64, "This is specialized for 64b accesses."); + + /// Element type + using Element = complex; + + /// Layout of source tile + using Layout = cutlass::layout::TensorOpMultiplicand64bCrosswise; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Load two elements per access + static int const kElementsPerAccess = 2; + + /// Policy defining internal details of tile iterator + struct Policy { + + /// Shape of one access + using Delta = layout::PitchLinearShape<4, 16>; + + /// Number of iterations to load + using Iterations = layout::PitchLinearShape< + InstructionShape::kContiguous / Delta::kContiguous, + Shape::kStrided / Delta::kStrided + >; + + }; + +private: + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Pointer type used for accesses + using AccessType = AlignedArray; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + +private: + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + + /// Internal counter for tracking K-group + Index k_group_idx_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): + stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0), + k_group_idx_(0) { + + int access_strided = lane_id / 8; + int access_contiguous = (lane_id % 8); + + byte_offset_ = (access_contiguous + access_strided * stride_) * sizeof(AccessType); + + pointer_= reinterpret_cast(ref.data()); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + pointer_ += offset / kElementsPerAccess; + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + int offset = (tile_offset.contiguous() * InstructionShape::kContiguous) * + stride_ * kElementsPerAccess + + tile_offset.strided() * Shape::kStrided; + + add_pointer_offset(offset); + + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) { + + add_tile_offset(tile_offset); + + if (k_group_idx_ & 1) + byte_offset_ ^= 0x40; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + pointer_ += stride_ * InstructionShape::kContiguous; + + // xor ptr + byte_offset_ ^= 0x40; + + ++k_group_idx_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_byte_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + AccessType *fetch_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::Iterations::kContiguous; ++c) { + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::Iterations::kStrided; ++s) { + + int access_idx = c * Policy::Iterations::kStrided + s; + + AccessType const *source_ptr = pointer_ + + Policy::Delta::kContiguous * c * stride_ + + Policy::Delta::kStrided * s / kElementsPerAccess; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + byte_offset + byte_offset_; + + AccessType const *source = reinterpret_cast(source_byte_ptr); + + fetch_ptr[access_idx] = *source; + } + } + + Element *exchange_ptr = reinterpret_cast(&frag); + + // exchange on 64b granularity only for fragments held in k=8/2 to k=8 + CUTLASS_PRAGMA_UNROLL + for (int i = Fragment::kElements/2; i < Fragment::kElements; i += 2) { + Element tmp = exchange_ptr[i]; + exchange_ptr[i] = exchange_ptr[i + 1]; + exchange_ptr[i + 1] = tmp; + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = tile_offset.contiguous() * + InstructionShape::kContiguous / + Layout::kElementsPerAccess + + tile_offset.strided() * Shape::kStrided * stride_; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + k_group_idx_ = k_group; + } +}; + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op.h new file mode 100644 index 0000000000000000000000000000000000000000..31a661f723fb6a40f7a42ffbb7a21a3364dbe948 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op.h @@ -0,0 +1,642 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing warp-level matrix multiply-accumulate operations targeting + Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/array.h" +#include "cutlass/complex.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/arch/mma_sm75.h" +#include "cutlass/arch/mma_sm80.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma.h" + +#include "cutlass/gemm/warp/mma_tensor_op_policy.h" +#include "cutlass/gemm/warp/mma_tensor_op.h" + +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" +#include "cutlass/gemm/warp/mma_gaussian_complex_tensor_op_tile_iterator_sm80.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename RealElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename RealElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename RealElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Complex transform on A operand + ComplexTransform TransformA = ComplexTransform::kNone, + /// Complex transform on B operand + ComplexTransform TransformB = ComplexTransform::kNone, + /// Do source operands need more than one elements + bool GeneralizedOperatorElements = false, + /// Used for partial specialization + typename Enable = bool +> +class MmaGaussianComplexTensorOp; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex*complex+complex => complex using real-valued TensorOps +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename RealElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename RealElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename RealElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Complex transform on A operand + ComplexTransform TransformA, + /// Complex transform on B operand + ComplexTransform TransformB +> +class MmaGaussianComplexTensorOp< + Shape_, + complex, + LayoutA_, + complex, + LayoutB_, + complex, + LayoutC_, + Policy_, + TransformA, + TransformB> { +public: + /// Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + /// Data type of multiplicand A + using ElementA = complex; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of multiplicand B + using ElementB = complex; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of accumulator matrix C + using ElementC = complex; + + /// Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) + using Policy = Policy_; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename Policy::Operator; + + /// Shape of underlying instruction + using InstructionShape = typename ArchMmaOperator::Shape; + + /// Underlying arch tag + using ArchTag = typename ArchMmaOperator::ArchTag; + + /// Indicates class of matrix operator + using OperatorClass = arch::OpClassTensorOp; + + /// Indicates math operator + using MathOperator = arch::OpMultiplyAddGaussianComplex; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = TransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = TransformB; + + + /// Number of threads participating in warp-level matrix product + static int const kThreadCount = 32; + +public: + + /// Iterates over the A operand in memory + using IteratorA = MmaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kA, + ElementA, + LayoutA, + MatrixShape, + Policy::OpDelta::kRow, + 32, + 1 + >; + + /// Storage for A tile + using FragmentA = typename IteratorA::Fragment; + + /// Storage for transformed A tile + using TransformedFragmentA = FragmentA; + + /// Iterates over the B operand in memory + using IteratorB = MmaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kB, + ElementB, + LayoutB, + MatrixShape, + Policy::OpDelta::kColumn, + 32, + 1 + >; + + /// Storage for B tile + using FragmentB = typename IteratorB::Fragment; + + /// Storage for transformed B tile + using TransformedFragmentB = FragmentB; + + static_assert( + !(Shape::kM % ArchMmaOperator::Shape::kM) && + !(Shape::kN % ArchMmaOperator::Shape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + + /// Number of mma operations performed + using MmaIterations = MatrixShape< + Shape::kM / ArchMmaOperator::Shape::kM, + Shape::kN / ArchMmaOperator::Shape::kN + >; + + /// Iterates over the C operand in memory + using IteratorC = MmaTensorOpGaussianComplexAccumulatorTileIterator< + MatrixShape, + ElementC, + LayoutC, + typename ArchMmaOperator::Shape, + typename Policy::OpDelta>; + + /// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this + /// storage arrangement is to be considered 'gaussian complex' in the sense that the accumulation is + /// done in three parts namely part1, part2, and part3. The parts 1, 2, and 3 are stored consecutively + /// in InteratorC::Frament. This matches the structure of Tensor Cores which are always real-valued matrix multiplies. + using FragmentC = typename IteratorC::Fragment; + + static_assert( + FragmentC::kElements == 3 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements, + "Unexpected gaussian complex fragment length."); + +private: + + // + // Data members + // + + /// Underlying real-valued matrix multiply operator (concept: arch::Mma) + ArchMmaOperator mma; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + MmaGaussianComplexTensorOp() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + FragmentA const &A, + FragmentB const &B, + FragmentC const &C + ) const { + + // Alias types for underlying real-valued matrix multiply operator + using MmaOperandA = typename ArchMmaOperator::FragmentA; + using MmaOperandB = typename ArchMmaOperator::FragmentB; + using MmaOperandC = typename ArchMmaOperator::FragmentC; + + static_assert(MmaOperandA::kElements == 1, + "This implementation only supports math instructions in which exactly one element is needed for the A operand." + "We can geneneralize later."); + + static_assert(MmaOperandB::kElements == 1, + "This implementation only supports math instructions in which exactly one element is needed for the B operand." + "We can geneneralize later."); + + D = C; + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < MmaIterations::kRow; ++m) { + + // mma(accum.part1(), (a.real() + a.imag()), b.real(), accum.part1()); + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + // Pack operands together. This may result in actual MOVs + MmaOperandA operand_Asum; + MmaOperandB operand_Br; + + operand_Asum[0] = A[m].real() + ((kTransformA == ComplexTransform::kConjugate) ? -A[m].imag() : +A[m].imag()); + operand_Br[0] = B[n].real(); + + // accumulator part1 + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow); + + mma(*accum, operand_Asum, operand_Br, *accum); + } + + // mma(accum.part2(), -a.real(), (b.real() - b.imag()), accum.part2()); + CUTLASS_PRAGMA_UNROLL + for (int n = MmaIterations::kColumn - 1; n >= 0; --n) { + + // Pack operands together. This may result in actual MOVs + MmaOperandA operand_Ar; + MmaOperandB operand_Bdiff; + + operand_Ar[0] = -A[m].real(); + operand_Bdiff[0] = B[n].real() - ((kTransformB == ComplexTransform::kConjugate) ? -B[n].imag() : +B[n].imag()); + + // accumulator part2 + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow) + MmaIterations::kCount; + + mma(*accum, operand_Ar, operand_Bdiff, *accum); + } + + // mma(accum.part3(), a.imag(), (b.real() + b.imag()), accum.part3()) + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + // Pack operands together. This may result in actual MOVs + MmaOperandA operand_Ai; + MmaOperandB operand_Bsum; + + operand_Ai[0] = (kTransformA == ComplexTransform::kConjugate) ? -A[m].imag() : +A[m].imag(); + operand_Bsum[0] = B[n].real() + ((kTransformB == ComplexTransform::kConjugate) ? -B[n].imag() : +B[n].imag()); + + // accumulator part3 + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow) + 2 * MmaIterations::kCount; + + mma(*accum, operand_Ai, operand_Bsum, *accum); + } + } + } + + /// Transform the mma operands to the required types + CUTLASS_DEVICE + void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, + FragmentA const &A, FragmentB const &B) const { + dst_A = A; + dst_B = B; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex*complex+complex => complex using real-valued TensorOps +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename RealElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename RealElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename RealElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Complex transform on A operand + ComplexTransform TransformA, + /// Complex transform on B operand + ComplexTransform TransformB +> +class MmaGaussianComplexTensorOp< + Shape_, + complex, + LayoutA_, + complex, + LayoutB_, + complex, + LayoutC_, + Policy_, + TransformA, + TransformB, + true> { +public: + /// Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + /// Data type of multiplicand A + using ElementA = complex; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of multiplicand B + using ElementB = complex; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of accumulator matrix C + using ElementC = complex; + + /// Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) + using Policy = Policy_; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename Policy::Operator; + + /// Shape of underlying instruction + using InstructionShape = typename ArchMmaOperator::Shape; + + /// Underlying arch tag + using ArchTag = typename ArchMmaOperator::ArchTag; + + /// Indicates class of matrix operator + using OperatorClass = arch::OpClassTensorOp; + + /// Indicates math operator + using MathOperator = arch::OpMultiplyAddGaussianComplex; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = TransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = TransformB; + + + /// Number of threads participating in warp-level matrix product + static int const kThreadCount = 32; + +public: + + /// Iterates over the A operand in memory + using IteratorA = MmaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kA, + ElementA, + LayoutA, + MatrixShape, + Policy::OpDelta::kRow, + 32, + 1 + >; + + /// Storage for A tile + using FragmentA = typename IteratorA::Fragment; + + /// Storage for transformed A tile + using TransformedFragmentA = FragmentA; + + /// Iterates over the B operand in memory + using IteratorB = MmaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kB, + ElementB, + LayoutB, + MatrixShape, + Policy::OpDelta::kColumn, + 32, + 1 + >; + + /// Storage for B tile + using FragmentB = typename IteratorB::Fragment; + + /// Storage for transformed B tile + using TransformedFragmentB = FragmentB; + + static_assert( + !(Shape::kM % ArchMmaOperator::Shape::kM) && + !(Shape::kN % ArchMmaOperator::Shape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + + /// Number of mma operations performed + using MmaIterations = MatrixShape< + Shape::kM / ArchMmaOperator::Shape::kM, + Shape::kN / ArchMmaOperator::Shape::kN + >; + + /// Iterates over the C operand in memory + using IteratorC = MmaTensorOpGaussianComplexAccumulatorTileIterator< + MatrixShape, + ElementC, + LayoutC, + typename ArchMmaOperator::Shape, + typename Policy::OpDelta>; + + /// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this + /// storage arrangement is to be considered 'gaussian complex' in the sense that the accumulation is + /// done in three parts namely part1, part2, and part3. The parts 1, 2, and 3 are stored consecutively + /// in InteratorC::Frament. This matches the structure of Tensor Cores which are always real-valued matrix multiplies. + using FragmentC = typename IteratorC::Fragment; + + static_assert( + FragmentC::kElements == 3 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements, + "Unexpected gaussian complex fragment length."); + +private: + + // + // Data members + // + + /// Underlying real-valued matrix multiply operator (concept: arch::Mma) + ArchMmaOperator mma; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + MmaGaussianComplexTensorOp() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + FragmentA const &A, + FragmentB const &B, + FragmentC const &C + ) const { + + // Alias types for underlying real-valued matrix multiply operator + using MmaOperandA = typename ArchMmaOperator::FragmentA; + using MmaOperandB = typename ArchMmaOperator::FragmentB; + using MmaOperandC = typename ArchMmaOperator::FragmentC; + + D = C; + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < MmaIterations::kRow; ++m) { + + // mma(accum.part1(), (a.real() + a.imag()), b.real(), accum.part1()); + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + // Pack operands together. This may result in actual MOVs + MmaOperandA operand_Asum; + MmaOperandB operand_Br; + + CUTLASS_PRAGMA_UNROLL + for (int mk = 0; mk < MmaOperandA::kElements; ++mk) + operand_Asum[mk] = A[m*MmaOperandA::kElements + mk].real() + ((kTransformA == ComplexTransform::kConjugate) ? + -A[m*MmaOperandA::kElements + mk].imag() : +A[m*MmaOperandA::kElements + mk].imag()); + + CUTLASS_PRAGMA_UNROLL + for (int nk = 0; nk < MmaOperandB::kElements; ++nk) + operand_Br[nk] = B[n*MmaOperandB::kElements + nk].real(); + + // accumulator part1 + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow); + + mma(*accum, operand_Asum, operand_Br, *accum); + } + + // mma(accum.part2(), -a.real(), (b.real() - b.imag()), accum.part2()); + CUTLASS_PRAGMA_UNROLL + for (int n = MmaIterations::kColumn - 1; n >= 0; --n) { + + // Pack operands together. This may result in actual MOVs + MmaOperandA operand_Ar; + MmaOperandB operand_Bdiff; + + CUTLASS_PRAGMA_UNROLL + for (int mk = 0; mk < MmaOperandA::kElements; ++mk) + operand_Ar[mk] = -A[m*MmaOperandA::kElements + mk].real(); + + CUTLASS_PRAGMA_UNROLL + for (int nk = 0; nk < MmaOperandB::kElements; ++nk) + operand_Bdiff[nk] = B[n*MmaOperandB::kElements + nk].real() - ((kTransformB == ComplexTransform::kConjugate) ? + -B[n*MmaOperandB::kElements + nk].imag() : +B[n*MmaOperandB::kElements + nk].imag()); + + // accumulator part2 + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow) + MmaIterations::kCount; + + mma(*accum, operand_Ar, operand_Bdiff, *accum); + } + + // mma(accum.part3(), a.imag(), (b.real() + b.imag()), accum.part3()) + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + // Pack operands together. This may result in actual MOVs + MmaOperandA operand_Ai; + MmaOperandB operand_Bsum; + + CUTLASS_PRAGMA_UNROLL + for (int mk = 0; mk < MmaOperandA::kElements; ++mk) + operand_Ai[mk] = (kTransformA == ComplexTransform::kConjugate) ? + -A[m*MmaOperandA::kElements + mk].imag() : +A[m*MmaOperandA::kElements + mk].imag(); + + CUTLASS_PRAGMA_UNROLL + for (int nk = 0; nk < MmaOperandB::kElements; ++nk) + operand_Bsum[nk] = B[n*MmaOperandB::kElements + nk].real() + ((kTransformB == ComplexTransform::kConjugate) ? + -B[n*MmaOperandB::kElements + nk].imag() : +B[n*MmaOperandB::kElements + nk].imag()); + + // accumulator part3 + MmaOperandC *accum = reinterpret_cast(&D) + + (m + n * MmaIterations::kRow) + 2 * MmaIterations::kCount; + + mma(*accum, operand_Ai, operand_Bsum, *accum); + } + } + } + + /// Transform the mma operands to the required types + CUTLASS_DEVICE + void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, + FragmentA const &A, FragmentB const &B) const { + dst_A = A; + dst_B = B; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op_tile_iterator_sm80.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op_tile_iterator_sm80.h new file mode 100644 index 0000000000000000000000000000000000000000..1903622ee112e132e3d846416a059d02fc325418 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op_tile_iterator_sm80.h @@ -0,0 +1,390 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm80.h" +#include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h" + +#include "cutlass/platform/platform.h" +#include "cutlass/fast_math.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Element type + typename Element_, + /// Layout of operand in memory + typename Layout_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions, concept: MatrixShape) + typename OpDelta_> +class MmaTensorOpGaussianComplexAccumulatorTileIterator; + +//////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// +/// Partial specialization for complex +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of underlying field of reals. + typename RealElement, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions, concept: MatrixShape) + typename OpDelta_> +class MmaTensorOpGaussianComplexAccumulatorTileIterator< + Shape_, complex, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kC; + + /// Element type + using Element = complex; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajor; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + using OpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static_assert( + !(Shape::kRow % InstructionShape::kM) && + !(Shape::kColumn % InstructionShape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + + static_assert(platform::is_same::value, + "Layouts must be defined for logical MatrixCoord coordinate space."); + + /// Number of mma operations performed + using MmaIterations = MatrixShape; + }; + +private: + + // Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire + // shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements + // of that row. The accumulators within one row are assumed to be consecutive. + static int const kElementsPerAccess = InstructionShape::kN / 4; + static int const kRowsPerTile = 8; + static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile. It is assumed that the accumulators + /// are stored in a gaussian complex arrangement with parts 1, 2, and 3 as entirely contiguous + /// arranged as [part1, part2, part3] + using Fragment = Array; + + static int const kPart1Index = (Shape::kCount / kThreads) * 0; + static int const kPart2Index = (Shape::kCount / kThreads) * 1; + static int const kPart3Index = (Shape::kCount / kThreads) * 2; + +private: + + /// Reference to output tensor + TensorRef ref_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpGaussianComplexAccumulatorTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpGaussianComplexAccumulatorTileIterator( + TensorRef const &ref, + int lane_id + ): + ref_(ref) { + + int quad = (lane_id >> 2); + int lane_in_quad = (lane_id & 3); + + MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess); + + ref_.add_coord_offset(lane_offset); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpGaussianComplexAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpGaussianComplexAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpGaussianComplexAccumulatorTileIterator & operator++() { + // deliberate no-op + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpGaussianComplexAccumulatorTileIterator & operator--() { + // deliberate no-op + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpGaussianComplexAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpGaussianComplexAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + Fragment &frag, ///< fragment to load from the tensor + Index pointer_offset) const { ///< loads a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + + int mma_accum_start = kAccumulatorRows * kElementsPerAccess * + (mma_n * Policy::MmaIterations::kRow + mma_m); + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < kAccumulatorRows; ++row) { + CUTLASS_PRAGMA_UNROLL + for (int col = 0; col < kElementsPerAccess; ++col) { + int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + + row * kRowsPerTile; + int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; + + Element z = offset_ref.at({accum_m, accum_n}); + + frag[mma_accum_start + row * kElementsPerAccess + col + kPart1Index] = z.real() + z.imag(); + frag[mma_accum_start + row * kElementsPerAccess + col + kPart2Index] = -z.real(); + frag[mma_accum_start + row * kElementsPerAccess + col + kPart3Index] = z.imag(); + } + } + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + Fragment &frag, ///< fragment to load from the tensor + Index byte_offset) const { ///< loads a tile with a linear offset + + load_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles + + load(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles + Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset + + load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } + + /// Stores a fragment to memory + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index pointer_offset) const { ///< store a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + + int mma_accum_start = kAccumulatorRows * kElementsPerAccess * + (mma_n * Policy::MmaIterations::kRow + mma_m); + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < kAccumulatorRows; ++row) { + CUTLASS_PRAGMA_UNROLL + for (int col = 0; col < kElementsPerAccess; ++col) { + int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + + row * kRowsPerTile; + int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; + int idx = mma_accum_start + row * kElementsPerAccess + col; + + Element z(frag[kPart1Index + idx] - frag[kPart3Index + idx], + frag[kPart1Index + idx] + frag[kPart2Index + idx]); + + offset_ref.at({accum_m, accum_n}) = z; + } + } + } + } + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_byte_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index byte_offset) const { ///< store a tile with a linear offset + + store_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Stores a fragment to memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + Fragment &frag, ///< fragment to store to the tensor + TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles + + store(frag, tile_offset, 0); + } + + /// Stores a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + /// fragment to store to the tensor + Fragment const &frag, + /// stores a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// stores a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_planar_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_planar_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..894efd7ab50dd40bf78c89b786a8dedcf64d3652 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_planar_complex.h @@ -0,0 +1,182 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing warp-level matrix multiply-accumulate operations. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/complex.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/array_planar_complex.h" +#include "cutlass/gemm/warp/tile_iterator_planar_complex.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Underlying real-valued warp-level matrix multiply + typename Operator_, + /// Transformation applied to A operand (typically folded into math instruction) + ComplexTransform TransformA = ComplexTransform::kNone, + /// Transformation applied to B operand (typically folded into math instruction) + ComplexTransform TransformB = ComplexTransform::kNone +> +class MmaPlanarComplex { +public: + + /// Underlying real-valued warp-level matrix multiply + using Operator = Operator_; + + /// Shape of warp-level matrix multipy + using Shape = typename Operator::Shape; + + /// Transformation applied to A operand (typically folded into math instruction) + static ComplexTransform const kTransformA = TransformA; + + /// Transformation applied to B operand (typically folded into math instruction) + static ComplexTransform const kTransformB = TransformB; + + /// Fragment of elements + using FragmentA = ArrayPlanarComplex; + + /// Iterator into planar complex + using IteratorA = TileIteratorPlanarComplex; + + /// Layout in memory of the A operand + using LayoutA = typename Operator::LayoutA; + + using FragmentB = ArrayPlanarComplex; + + /// Iterator into planar complex + using IteratorB = TileIteratorPlanarComplex; + + /// Layout in memory of the B operand + using LayoutB = typename Operator::LayoutB; + + /// Tile iterator for accumulator + using IteratorC = TileIteratorPlanarComplex; + + /// Accumulator fragment + using FragmentC = ArrayPlanarComplex; + + /// Layout of accumulator fragment in memory + using LayoutC = typename Operator::LayoutC; + +private: + + /// Number of mma operations performed + using MmaIterations = MatrixShape< + Operator::Shape::kM / Operator::Policy::Operator::Shape::kM, + Operator::Shape::kN / Operator::Policy::Operator::Shape::kN + >; + +public: + /// Ctor + CUTLASS_DEVICE + MmaPlanarComplex() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + FragmentA const &A_in, + FragmentB const &B_in, + FragmentC const &C) const { + + D.real = C.real; + D.imag = C.imag; + + // + // Transform fragments based on conjugate operations. + // + + negate neg_A; + + FragmentA frag_A; + frag_A.real = A_in.real; + + if (kTransformA == ComplexTransform::kConjugate) { + frag_A.imag = neg_A(frag_A.imag); + } + else { + frag_A.imag = frag_A.imag; + } + + FragmentB frag_B; + frag_B.real = B_in.real; + + if (kTransformB == ComplexTransform::kConjugate) { + negate neg; + frag_B.imag = neg(frag_B.imag); + } + else { + frag_B.imag = frag_B.imag; + } + + // + // Accumulated real-valued matrix multiplies + // + + Operator real_mma; + + // D.i += A.i * B.r + real_mma(D.imag, frag_A.imag, frag_B.real, D.imag); + + // D.r += A.r * B.r + real_mma(D.real, frag_A.real, frag_B.real, D.real); + + // D.i += A.r * B.i + real_mma(D.imag, frag_A.real, frag_B.imag, D.imag); + + // D.r += -A.i * B.i + frag_A.imag = neg_A(frag_A.imag); + real_mma(D.real, frag_A.imag, frag_B.imag, D.real); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_simt.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_simt.h new file mode 100644 index 0000000000000000000000000000000000000000..ecde134a9d06c19880860bed83e436025618b04b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_simt.h @@ -0,0 +1,263 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing warp-level matrix multiply-accumulate operations. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma.h" + +#include "cutlass/gemm/thread/mma.h" + +#include "cutlass/gemm/warp/mma_simt_tile_iterator.h" +#include "cutlass/gemm/warp/mma_simt_policy.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename ElementC_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Shape of the warp in units of thread (concept: MmaSimtPolicy) + typename Policy_, + /// Number of partitions along K dimension + int PartitionsK = 1, + /// Complex transformation on operand A + ComplexTransform TransformA = ComplexTransform::kNone, + /// Complex transformation on operand B + ComplexTransform TransformB = ComplexTransform::kNone, + /// Used for partial specialization + typename Enable = bool +> +class MmaSimt { +public: + /// Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + /// Data type of multiplicand A + using ElementA = ElementA_; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of multiplicand B + using ElementB = ElementB_; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of accumulator matrix C + using ElementC = ElementC_; + + /// Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) + using Policy = Policy_; + + /// Indicates class of matrix operator + using OperatorClass = arch::OpClassSimt; + + /// Hard-coded for now + using ArchTag = arch::Sm50; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = TransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = TransformB; + + /// Layout of threads + using ThreadLayoutA = typename platform::conditional< platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutA >::value, + layout::ColumnMajor, + typename platform::conditional < platform::is_same< layout::RowMajorInterleaved<4>, LayoutA >::value, + layout::RowMajor, + LayoutA>::type + >::type; + + using ThreadLayoutB = typename platform::conditional< platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutB >::value, + layout::ColumnMajor, + typename platform::conditional < platform::is_same< layout::RowMajorInterleaved<4>, LayoutB >::value, + layout::RowMajor, + LayoutB>::type + >::type; + + static constexpr bool use_dp4a = (platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutA>::value || + platform::is_same< layout::RowMajorInterleaved<4>, LayoutA >::value) && + platform::is_same< ElementA, int8_t >::value && + platform::is_same< ElementB, int8_t >::value; + + using dp4a_type = typename platform::conditional< use_dp4a , int8_t, bool >::type; + + /// Thread-level matrix multiply accumulate operator + using ThreadMma = thread::Mma< + GemmShape< + Shape::kM / Policy::WarpShape::kRow, + Shape::kN / Policy::WarpShape::kColumn, + Policy::LaneMmaShape::kK>, + ElementA, + ThreadLayoutA, + ElementB, + ThreadLayoutB, + ElementC, + LayoutC, + arch::OpMultiplyAdd, + dp4a_type + >; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename ThreadMma::ArchMmaOperator; + + /// Indicates math operator + using MathOperator = typename ArchMmaOperator::Operator; + + /// Shape of the underlying instruction + using InstructionShape = GemmShape<1,1,use_dp4a ? 4 : 1>; + +public: + + /// Iterates over the A operand in memory + using IteratorA = MmaSimtTileIterator< + MatrixShape, + Operand::kA, + ElementA, + LayoutA, + Policy, + PartitionsK, + Shape::kK + >; + + /// Storage for A tile + using FragmentA = typename IteratorA::Fragment; + + /// Storage for transformed A tile + using TransformedFragmentA = FragmentA; + + /// Iterates over the B operand in memory + using IteratorB = MmaSimtTileIterator< + MatrixShape, + Operand::kB, + ElementB, + LayoutB, + Policy, + PartitionsK, + Shape::kK + >; + + /// Storage for B tile + using FragmentB = typename IteratorB::Fragment; + + /// Storage for transformed A tile + using TransformedFragmentB = FragmentB; + + /// Iterates over the C operand in memory + using IteratorC = MmaSimtTileIterator< + MatrixShape, + Operand::kC, + ElementC, + LayoutC, + Policy + >; + + /// Storage for C tile + using FragmentC = typename ThreadMma::FragmentC; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + MmaSimt() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &d, + FragmentA a, + FragmentB b, + FragmentC const &c, int group_idx = 0) const { + + ThreadMma mma; + + if (kTransformA == ComplexTransform::kConjugate) { + conjugate conj_a; + a = conj_a(a); + } + + if (kTransformB == ComplexTransform::kConjugate) { + conjugate conj_b; + b = conj_b(b); + } + + mma(d, a, b, c); + } + + /// Transform the mma operands to the required types + CUTLASS_DEVICE + void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, + FragmentA const &A, FragmentB const &B) const { + dst_A = A; + dst_B = B; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_simt_policy.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_simt_policy.h new file mode 100644 index 0000000000000000000000000000000000000000..a0b0a7500b30af15530fbed10b79c477a0f983ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_simt_policy.h @@ -0,0 +1,69 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Describes the lane policy used by warp-level matrix multiply operators targeting SIMT + instructions +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Describes the arrangement and configuration of per-lane operations in warp-level matrix multiply +template < + typename WarpShape_, ///< shape of the warp in lanes (concept: MatrixShape) + typename LaneLayout_, ///< layout function of lanes + typename LaneMmaShape_ ///< size of each lane's thread-level matrix product (concept: GemmShape) +> +struct MmaSimtPolicy { + using WarpShape = WarpShape_; + using LaneLayout = LaneLayout_; + using LaneMmaShape = LaneMmaShape_; + using MmaShape = LaneMmaShape; + + /// Returns a layout functor mapping lane position in the warp to thread ID + CUTLASS_HOST_DEVICE + static LaneLayout get_lane_layout() { + return LaneLayout::packed({WarpShape::kRow, WarpShape::kColumn}); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_simt_tile_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_simt_tile_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..53c1c36566bc30f9ce642cbb2e8983162ee34092 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_simt_tile_iterator.h @@ -0,0 +1,1890 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Describes the lane policy used by warp-level matrix multiply operators targeting SIMT + instructions +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" + +#include "cutlass/layout/matrix.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma_simt_policy.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Iterates over operands to warp-level matrix multiply operations targeting SIMT instructions +/// +/// concept: MutableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Operand identity + Operand Operand, + /// Data type of A elements + typename Element_, + /// Layout of operand + typename Layout_, + /// Shape of the warp in units of thread (concept: MmaSimtPolicy) + typename Policy_, + /// Number of partitions along K dimension - used in sliced-K + int PartitionsK = 1, + /// Group Size along kPartition - used in sliced-K + int PartitionGroupSize = 1 +> +class MmaSimtTileIterator; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Specialization for A operands of column-major layouts +/// +/// Concept: MutableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of A elements + typename Element_, + /// Shape of the warp in units of thread (concept: MmaSimtPolicy) + typename Policy_, + /// Number of partitions along K dimension - used in sliced-K + int PartitionsK, + /// Group Size along kPartition - used in sliced-K + int PartitionGroupSize +> +class MmaSimtTileIterator { +public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kA; + + /// Element type + using Element = Element_; + + /// Layout of policy + using Layout = layout::ColumnMajor; + + /// Decomposition of elements among threads + using Policy = Policy_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + // + // Derived quantities + // + + static_assert(!(Shape::kRow % Policy::WarpShape::kRow), + "The warp-level GEMM M size must be divisible by the number of threads arranged along the M dimension."); + + static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); + static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); + static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero."); + static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero."); + + /// Thread-level shape of a fragment + using ThreadShape = MatrixShape< + Shape::kRow / Policy::WarpShape::kRow, + Shape::kColumn + >; + + static_assert(!(ThreadShape::kRow % Policy::LaneMmaShape::kM), + "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); + + /// Number of individual loads + using Iterations = MatrixShape< + ThreadShape::kRow / Policy::LaneMmaShape::kM, + ThreadShape::kColumn + >; + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + +private: + + /// Internal reference + cutlass::TensorRef, layout::ColumnMajor> ref_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaSimtTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaSimtTileIterator( + TensorRef ref, + int lane_id + ) { + + // compute offset based on thread ID and lane layout + typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); + + MatrixCoord lane_offset = lane_layout.inverse(lane_id) * + MatrixCoord(Policy::LaneMmaShape::kM, 0); + + ref.add_coord_offset(lane_offset); + + ref_.reset( + reinterpret_cast *>(ref.data()), + ref.stride(0) / Policy::LaneMmaShape::kM); + } + + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { + + ref_.add_coord_offset({ + coord.row() * Shape::kRow / Policy::LaneMmaShape::kM, + coord.column() * Shape::kColumn}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator++() { + + ref_.add_coord_offset({0, Shape::kColumn}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator--() { + + ref_.add_coord_offset({0, -Shape::kColumn}); + + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. (vector loads) + CUTLASS_HOST_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { + Array *dst_ptr = + reinterpret_cast *>(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kColumn; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Iterations::kRow; ++m) { + + // This logic has been replaced with calls to inline PTX to guarantee vectorization. + #if 0 + dst_ptr[m + k * Iterations::kRow] = + *(ref_.data() + ref_.offset({m * Policy::WarpShape::kRow, k}) + pointer_offset / Policy::LaneMmaShape::kM); + #endif + + auto ptr = ref_.data() + ref_.offset({m * Policy::WarpShape::kRow, k}) + pointer_offset / Policy::LaneMmaShape::kM; + arch::shared_load(dst_ptr[m + k * Iterations::kRow], ptr); + } + } + } + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { + + Array const *src_ptr = + reinterpret_cast *>(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kN; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Iterations::kM; ++m) { + *(ref_.data() + ref_.offset(m * Policy::WarpShape::kM, k) + pointer_offset / Policy::LaneMmaShape::kM) = + src_ptr[m + k * Iterations::kM]; + } + } + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation here + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Specialization for A operands of row-major layouts +/// +/// Concept: MutableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of A elements + typename Element_, + /// Shape of the warp in units of thread (concept: MmaSimtPolicy) + typename Policy_, + /// Number of partitions along K dimension - used in sliced-K + int PartitionsK, + /// Group Size along kPartition - used in sliced-K + int PartitionGroupSize +> +class MmaSimtTileIterator { +public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kA; + + /// Element type + using Element = Element_; + + /// Layout of policy + using Layout = layout::RowMajor; + + /// Decomposition of elements among threads + using Policy = Policy_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + // + // Derived quantities + // + + static_assert(!(Shape::kRow % Policy::WarpShape::kRow), + "The warp-level GEMM M size must be divisible by the number of threads arranged along the M dimension."); + + static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); + static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); + static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero."); + static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero."); + + /// Thread-level shape of a fragment + using ThreadShape = MatrixShape< + Shape::kRow / Policy::WarpShape::kRow, + Shape::kColumn + >; + + static_assert(!(ThreadShape::kRow % Policy::LaneMmaShape::kM), + "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); + + /// Number of individual loads (scalar loads) + using Iterations = MatrixShape< + ThreadShape::kRow / Policy::LaneMmaShape::kM, + ThreadShape::kColumn + >; + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + +private: + + /// Internal reference + cutlass::TensorRef ref_; + + /// Extent of tensor + MatrixCoord extent_; + + /// Origin + MatrixCoord origin_; + + /// Used to conditionally enable extents checking + bool divisible_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaSimtTileIterator() : divisible_(true) { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaSimtTileIterator( + TensorRef ref, + int lane_id + ) : extent_(Shape::kRow, Shape::kColumn), divisible_ (true) { + + // compute offset based on thread ID and lane layout + typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); + + MatrixCoord lane_offset = lane_layout.inverse(lane_id) * + MatrixCoord(Policy::LaneMmaShape::kM, 0); + + origin_ = lane_offset; + + ref.add_coord_offset(lane_offset); + + ref_.reset(ref.data(), ref.stride(0)); + + } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaSimtTileIterator( + TensorRef ref, + TensorCoord extent, + int lane_id + ) : extent_(extent), divisible_ (false) { + + // compute offset based on thread ID and lane layout + typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); + + MatrixCoord lane_offset = lane_layout.inverse(lane_id) * + MatrixCoord(Policy::LaneMmaShape::kM, 0); + + origin_ = lane_offset; + + ref.add_coord_offset(lane_offset); + + ref_.reset(ref.data(), ref.stride(0)); + + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { + + TensorCoord coord_offset( + coord.row() * Shape::kRow, + coord.column() * Shape::kColumn); + + origin_ += coord_offset; + + ref_.add_coord_offset(coord_offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator++() { + + ref_.add_coord_offset({0, Shape::kColumn}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator--() { + + ref_.add_coord_offset({0, -Shape::kColumn}); + + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. (scalar loads) + CUTLASS_HOST_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kColumn; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Iterations::kRow; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Policy::LaneMmaShape::kM; i++) { + + MatrixCoord offset(m * Policy::WarpShape::kRow * Policy::LaneMmaShape::kM + i, k); + + MatrixCoord access_coord = origin_ + offset; + + int frag_idx = m * Policy::LaneMmaShape::kM + i + k * Iterations::kRow; + + if (divisible_ || + (access_coord.row() < extent_.row() && access_coord.column() < extent_.column())) { + + frag[frag_idx] = *(ref_.data() + ref_.offset(offset) + pointer_offset); + } + else { + frag[frag_idx] = Element(); + } + } + } + } + } + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kColumn; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Iterations::kRow; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Policy::LaneMmaShape::kM; i++) { + + *(ref_.data() + ref_.offset(m * Policy::WarpShape::kM * Policy::LaneMmaShape::kM + i, k) + pointer_offset) = + frag[m * Policy::LaneMmaShape::kM + i + k * Iterations::kM]; + } + } + } + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation here + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Specialization for B operands of row-major layouts +/// +/// Concept: MutableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of A elements + typename Element_, + /// Shape of the warp in units of thread (concept: MmaSimtPolicy) + typename Policy_, + /// Number of partitions along K dimension + int PartitionsK, + /// Group Size along kPartition - used in sliced-K + int PartitionGroupSize +> +class MmaSimtTileIterator { +public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kB; + + /// Element type + using Element = Element_; + + /// Layout of policy + using Layout = layout::RowMajor; + + /// Decomposition of elements among threads + using Policy = Policy_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + // + // Derived quantities + // + + static_assert(!(Shape::kColumn % Policy::WarpShape::kColumn), + "The warp-level GEMM N size must be divisible by the number of threads arranged along the N dimension."); + + static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); + static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); + static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero."); + static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero."); + + /// Thread-level shape of a fragment + using ThreadShape = MatrixShape< + Shape::kRow, + Shape::kColumn / Policy::WarpShape::kColumn + >; + + static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN), + "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); + + /// Number of individual loads + using Iterations = MatrixShape< + ThreadShape::kRow, + ThreadShape::kColumn / Policy::LaneMmaShape::kN + >; + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + +protected: + + /// Internal reference + cutlass::TensorRef, layout::RowMajor> ref_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaSimtTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaSimtTileIterator( + TensorRef ref, + int lane_id + ) { + + // compute offset based on thread ID and lane layout + typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); + + MatrixCoord lane_offset = lane_layout.inverse(lane_id) * + MatrixCoord(0, Policy::LaneMmaShape::kN); + + ref.add_coord_offset(lane_offset); + + ref_.reset( + reinterpret_cast *>(ref.data()), + ref.stride(0) / Policy::LaneMmaShape::kN); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { + + ref_.add_coord_offset({ + coord.row() * Shape::kRow, + coord.column() * Shape::kColumn / Policy::LaneMmaShape::kN}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator++() { + + ref_.add_coord_offset({Shape::kRow, 0}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator--() { + + ref_.add_coord_offset({-Shape::kRow, 0}); + + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. (vector loads) + CUTLASS_HOST_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { + + Array *dst_ptr = + reinterpret_cast *>(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kRow; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Iterations::kColumn; ++n) { + + #if 0 + dst_ptr[n + k * Iterations::kColumn] = + *(ref_.data() + ref_.offset({k, n * Policy::WarpShape::kColumn}) + pointer_offset / Policy::LaneMmaShape::kN); + #endif + + void const *ptr = ref_.data() + ref_.offset({k, n * Policy::WarpShape::kColumn}) + pointer_offset / Policy::LaneMmaShape::kN; + arch::shared_load(dst_ptr[n + k * Iterations::kColumn], ptr); + } + } + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { + + Array const *src_ptr = + reinterpret_cast *>(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kM; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Iterations::kN; ++n) { + *(ref_.data() + ref_.offset({k, n * Policy::WarpShape::kN}) + pointer_offset / Policy::LaneMmaShape::kN) = + src_ptr[n + k * Iterations::kN]; + } + } + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store(Fragment const &frag, Index pointer_offset) const { + store_with_pointer_offset(frag, 0); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation here + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Specialization for B operands of column-major layouts +/// +/// Concept: MutableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of A elements + typename Element_, + /// Shape of the warp in units of thread (concept: MmaSimtPolicy) + typename Policy_, + /// Number of partitions along K dimension + int PartitionsK, + /// Group Size along kPartition - used in sliced-K + int PartitionGroupSize +> +class MmaSimtTileIterator { +public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kB; + + /// Element type + using Element = Element_; + + /// Layout of policy + using Layout = layout::ColumnMajor; + + /// Decomposition of elements among threads + using Policy = Policy_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + // + // Derived quantities + // + + static_assert(!(Shape::kColumn % Policy::WarpShape::kColumn), + "The warp-level GEMM N size must be divisible by the number of threads arranged along the N dimension."); + + static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); + static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); + static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero."); + static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero."); + + /// Thread-level shape of a fragment + using ThreadShape = MatrixShape< + Shape::kRow, + Shape::kColumn / Policy::WarpShape::kColumn + >; + + static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN), + "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); + + /// Number of individual loads + using Iterations = MatrixShape< + ThreadShape::kRow, + ThreadShape::kColumn / Policy::LaneMmaShape::kN + >; + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + +private: + + /// Internal reference + cutlass::TensorRef ref_; + + /// Extent of tensor + MatrixCoord extent_; + + /// Origin + MatrixCoord origin_; + + /// Used to conditionally enable extents checking + bool divisible_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaSimtTileIterator(): divisible_(true) { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaSimtTileIterator( + TensorRef ref, + int lane_id + ): extent_(Shape::kRow, Shape::kColumn), divisible_(true) { + + // compute offset based on thread ID and lane layout + typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); + + MatrixCoord lane_offset = lane_layout.inverse(lane_id) * + MatrixCoord(0, Policy::LaneMmaShape::kN); + + origin_ = lane_offset; + + ref.add_coord_offset(lane_offset); + + ref_.reset(ref.data(), ref.stride(0)); + } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaSimtTileIterator( + TensorRef ref, + TensorCoord extent, + int lane_id + ): extent_(extent), divisible_(false) { + + // compute offset based on thread ID and lane layout + typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); + + MatrixCoord lane_offset = lane_layout.inverse(lane_id) * + MatrixCoord(0, Policy::LaneMmaShape::kN); + + origin_ = lane_offset; + + ref.add_coord_offset(lane_offset); + + ref_.reset(ref.data(), ref.stride(0)); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { + + TensorCoord coord_offset( + coord.row() * Shape::kRow, + coord.column() * Shape::kColumn); + + origin_ += coord_offset; + + ref_.add_coord_offset(coord_offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator++() { + + ref_.add_coord_offset({Shape::kRow, 0}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator--() { + + ref_.add_coord_offset({-Shape::kRow, 0}); + + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. (scalar loads) + CUTLASS_HOST_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kRow; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Iterations::kColumn; ++n) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Policy::LaneMmaShape::kN; ++i) { + + MatrixCoord offset(k, n * Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN + i); + + MatrixCoord access_coord = origin_ + offset; + + int frag_idx = n * Policy::LaneMmaShape::kN + i + k * Iterations::kColumn; + + if (divisible_ || + (access_coord.row() < extent_.row() && access_coord.column() < extent_.column())) { + + frag[frag_idx] = *(ref_.data() + ref_.offset(offset) + pointer_offset); + } + else { + frag[frag_idx] = Element(); + } + } + } + } + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { + + Array const *src_ptr = + reinterpret_cast *>(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kM; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Iterations::kN; ++n) { + *(ref_.data() + ref_.offset({k, n * Policy::WarpShape::kN}) + pointer_offset / Policy::LaneMmaShape::kN) = + src_ptr[n + k * Iterations::kN]; + } + } + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store(Fragment const &frag, Index pointer_offset) const { + store_with_pointer_offset(frag, 0); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation here + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Specialization for C operands of column-major layouts +/// +/// Concept: MutableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of A elements + typename Element_, + /// Shape of the warp in units of thread (concept: MmaSimtPolicy) + typename Policy_ +> +class MmaSimtTileIterator { +public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kC; + + /// Element type + using Element = Element_; + + /// Layout of accumulators in memory + using Layout = layout::ColumnMajor; + + /// Decomposition of elements among threads + using Policy = Policy_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + // + // Derived quantities + // + + static_assert( + (!(Shape::kRow % Policy::WarpShape::kRow)) && (!(Shape::kColumn % Policy::WarpShape::kColumn)), + "Warp-level GEMM shape must be divisible by the arrangement of threads in the warp."); + + static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); + static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); + static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero."); + static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero."); + static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero."); + static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero."); + + /// Thraed-level shape of a fragment + using ThreadShape = MatrixShape< + Shape::kRow / Policy::WarpShape::kRow, + Shape::kColumn / Policy::WarpShape::kColumn + >; + + static_assert( + (!(ThreadShape::kRow % Policy::LaneMmaShape::kM)) && (!(ThreadShape::kColumn % Policy::LaneMmaShape::kN)), + "Warp-level GEMM shape must be divisible by the arrangement of threads in the warp."); + + /// Number of individual loads + using Iterations = MatrixShape< + ThreadShape::kRow / Policy::LaneMmaShape::kM, + ThreadShape::kColumn / Policy::LaneMmaShape::kN + >; + + using Delta = MatrixShape< + Policy::WarpShape::kRow * Policy::LaneMmaShape::kM, + Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN + >; + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + +private: + + TensorRef ref_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaSimtTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaSimtTileIterator( + TensorRef const &ref, + int lane_id + ): + ref_(ref) { + + // compute offset based on thread ID and lane layout + typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); + + MatrixCoord lane_offset = lane_layout.inverse(lane_id) * + MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN); + + ref_.add_coord_offset(lane_offset); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { + + ref_.add_coord_offset({ + coord.row() * Shape::kRow, + coord.column() * Shape::kColumn}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator++() { + + ref_.add_coord_offset({Shape::kRow, 0}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator--() { + + ref_.add_coord_offset({-Shape::kRow, 0}); + + return *this; + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_HOST_DEVICE + void load_with_pointer_offset( + Fragment &frag, ///< fragment to be loaded from memory + Index pointer_offset) const { ///< linear offset (in units of Element) when loading + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Iterations::kN; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) { + + Array const *src_ptr = + reinterpret_cast const *>( + ref_.data() + pointer_offset + ref_.offset({0, mma_n * Delta::kN + n})); + + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Iterations::kM; ++mma_m) { + + Array *dst_ptr = + reinterpret_cast *>(&frag) + + mma_m + Iterations::kM * (n + mma_n * Policy::LaneMmaShape::kN); + + *dst_ptr = src_ptr[mma_m * Policy::WarpShape::kM]; + } + } + } + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) { + + Array *dst_ptr= + reinterpret_cast *>( + ref_.data() + pointer_offset + ref_.offset({0, mma_n * Delta::kColumn + n})); + + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) { + + Array const *src_ptr = + reinterpret_cast const *>(&frag) + + mma_m + Iterations::kRow * (n + mma_n * Policy::LaneMmaShape::kN); + + dst_ptr[mma_m * Policy::WarpShape::kRow] = *src_ptr; + } + } + } + } + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Specialization for C operands of row-major layouts +/// +/// Concept: MutableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of A elements + typename Element_, + /// Shape of the warp in units of thread (concept: MmaSimtPolicy) + typename Policy_ +> +class MmaSimtTileIterator { +public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kC; + + /// Element type + using Element = Element_; + + /// Layout of accumulators in memory + using Layout = layout::RowMajor; + + /// Decomposition of elements among threads + using Policy = Policy_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + // + // Derived quantities + // + + static_assert( + (!(Shape::kRow % Policy::WarpShape::kRow)) && (!(Shape::kColumn % Policy::WarpShape::kColumn)), + "Warp-level GEMM shape must be divisible by the arrangement of threads in the warp."); + + static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); + static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); + static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero."); + static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero."); + static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero."); + static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero."); + + /// Thraed-level shape of a fragment + using ThreadShape = MatrixShape< + Shape::kRow / Policy::WarpShape::kRow, + Shape::kColumn / Policy::WarpShape::kColumn + >; + + static_assert( + (!(ThreadShape::kRow % Policy::LaneMmaShape::kM)) && (!(ThreadShape::kColumn % Policy::LaneMmaShape::kN)), + "Warp-level GEMM shape must be divisible by the arrangement of threads in the warp."); + + /// Number of individual loads + using Iterations = MatrixShape< + ThreadShape::kRow / Policy::LaneMmaShape::kM, + ThreadShape::kColumn / Policy::LaneMmaShape::kN + >; + + using Delta = MatrixShape< + Policy::WarpShape::kRow * Policy::LaneMmaShape::kM, + Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN + >; + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + +private: + + TensorRef ref_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaSimtTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaSimtTileIterator( + TensorRef const &ref, + int lane_id + ): + ref_(ref) { + + // compute offset based on thread ID and lane layout + typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); + + MatrixCoord lane_offset = lane_layout.inverse(lane_id) * + MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN); + + ref_.add_coord_offset(lane_offset); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { + + ref_.add_coord_offset({ + coord.row() * Shape::kRow, + coord.column() * Shape::kColumn}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator++() { + + ref_.add_coord_offset({Shape::kRow, 0}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator--() { + + ref_.add_coord_offset({-Shape::kRow, 0}); + + return *this; + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_HOST_DEVICE + void load_with_pointer_offset( + Fragment &frag, ///< fragment to be loaded from memory + Index pointer_offset) const { ///< linear offset (in units of Element) when loading + + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) { + + Array const *src_ptr = + reinterpret_cast const *>( + ref_.data() + pointer_offset + ref_.offset({mma_m * Delta::kRow + m, 0})); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) { + + Array *dst_ptr = + reinterpret_cast *>(&frag) + + mma_n + Iterations::kColumn * (m + mma_m * Policy::LaneMmaShape::kM); + + *dst_ptr = src_ptr[mma_n * Policy::WarpShape::kColumn]; + } + } + } + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) { + + Array *dst_ptr = + reinterpret_cast *>( + ref_.data() + pointer_offset + ref_.offset({mma_m * Delta::kRow + m, 0})); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) { + + Array const *src_ptr = + reinterpret_cast const *>(&frag) + + mma_n + Iterations::kColumn * (m + mma_m * Policy::LaneMmaShape::kM); + + dst_ptr[mma_n * Policy::WarpShape::kColumn] = *src_ptr; + } + } + } + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Specialization for A operands of column-major-K interleaved layouts +/// +/// Concept: MutableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of A elements + typename Element_, + /// Shape of the warp in units of thread (concept: MmaSimtPolicy) + typename Policy_, + /// Number of partitions along K dimension + int PartitionsK, + /// Number of KGroups per kPartition + int PartitionGroupSize +> +class MmaSimtTileIterator, Policy_, PartitionsK, PartitionGroupSize> { +public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kA; + + /// Element type + using Element = Element_; + + /// Layout of policy + using Layout = layout::ColumnMajorInterleaved<4> ; + + /// Decomposition of elements among threads + using Policy = Policy_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Iterleave factor + static const int kInterleave = 4; + + /// Number of partitions along K dimension + static const int kPartitionsK = PartitionsK; + + /// Number of KGroups per kPartition + static const int kGroupPerTile = PartitionGroupSize / Shape::kColumn; + + // + // Derived quantities + // + + static_assert(!(Shape::kRow % Policy::WarpShape::kRow), + "The warp-level GEMM M size must be divisible by the number of threads arranged along the M dimension."); + + static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); + static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); + static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero."); + static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero."); + + /// Thread-level shape of a fragment + using ThreadShape = MatrixShape< + Shape::kRow / Policy::WarpShape::kRow, + Shape::kColumn + >; + + static_assert(!(ThreadShape::kRow % Policy::LaneMmaShape::kM) && !(ThreadShape::kColumn % Policy::LaneMmaShape::kK), + "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); + + /// Number of individual loads + using Iterations = MatrixShape< + ThreadShape::kRow / Policy::LaneMmaShape::kM, + ThreadShape::kColumn / Policy::LaneMmaShape::kK + >; + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + +private: + + /// Internal reference + cutlass::TensorRef, layout::ColumnMajorInterleaved<4>> ref_; + + /// group index within tile + int k_group_idx_; + +public: + CUTLASS_HOST_DEVICE + MmaSimtTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaSimtTileIterator( + TensorRef ref, + int lane_id + ) { + + // compute offset based on thread ID and lane layout + typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); + + MatrixCoord lane_offset = lane_layout.inverse(lane_id) * + MatrixCoord(Policy::LaneMmaShape::kM, 0); + + ref.add_coord_offset(lane_offset); + + k_group_idx_ = 0; + ref_.reset(reinterpret_cast *>(ref.data()), ref.stride(0)/Policy::LaneMmaShape::kMK); + } + + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { + + ref_.add_coord_offset({ + coord.row() * Shape::kRow / Policy::LaneMmaShape::kMK, + coord.column() * Shape::kColumn}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator++() { + + add_tile_offset({0, 1}); + + if (kPartitionsK > 1) { + ++k_group_idx_; + // Jump to next stage + if (k_group_idx_ == kGroupPerTile) { + k_group_idx_ = 0; + add_tile_offset({0, kGroupPerTile * (kPartitionsK-1)}); + } + } + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator--() { + + ref_.add_coord_offset({0, -Shape::kColumn}); + + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { + + Array *dst_ptr = + reinterpret_cast *>(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kColumn; ++k) { + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Iterations::kRow; ++m) { + + dst_ptr[m + k * Iterations::kRow] = + *((ref_.data() + ref_.offset({m * Policy::WarpShape::kRow / kInterleave, + k*Policy::LaneMmaShape::kK}) + pointer_offset / Policy::LaneMmaShape::kM)); + } + } + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { + + Array const *src_ptr = + reinterpret_cast *>(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kN; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Iterations::kM; ++m) { + *(ref_.data() + ref_.offset(m * Policy::WarpShape::kM, k) + pointer_offset / Policy::LaneMmaShape::kM) = + src_ptr[m + k * Iterations::kM]; + } + } + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation here + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Specialization for B operands of row-major k-interleaved layouts +/// +/// Concept: MutableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of A elements + typename Element_, + /// Shape of the warp in units of thread (concept: MmaSimtPolicy) + typename Policy_, + /// Number of partitions along K dimension + int PartitionsK, + /// Number of KGroups per kPartition + int PartitionGroupSize +> +class MmaSimtTileIterator, Policy_, PartitionsK, PartitionGroupSize> { +public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kB; + + /// Element type + using Element = Element_; + + /// Layout of policy + using Layout = layout::RowMajorInterleaved<4>; + + /// Decomposition of elements among threads + using Policy = Policy_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Interleave factor + static const int kInterleave = 4; + + /// Number of partitions along K dimension + static const int kPartitionsK = PartitionsK; + + /// Number of KGroups per kPartition + static const int kGroupPerTile = PartitionGroupSize / Shape::kRow; + + // + // Derived quantities + // + + static_assert(!(Shape::kColumn % Policy::WarpShape::kColumn), + "The warp-level GEMM N size must be divisible by the number of threads arranged along the N dimension."); + + static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); + static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); + static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero."); + static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero."); + + /// Thread-level shape of a fragment + using ThreadShape = MatrixShape< + Shape::kRow, + Shape::kColumn / Policy::WarpShape::kColumn + >; + + static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN) && !(ThreadShape::kRow % Policy::LaneMmaShape::kK), + "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); + + /// Number of individual loads + using Iterations = MatrixShape< + ThreadShape::kRow / Policy::LaneMmaShape::kK, + ThreadShape::kColumn / Policy::LaneMmaShape::kN + >; + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + + +private: + + /// Internal reference + cutlass::TensorRef, layout::RowMajorInterleaved<4>> ref_; + + /// group index within tile + int k_group_idx_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaSimtTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaSimtTileIterator( + TensorRef ref, + int lane_id + ) { + + // compute offset based on thread ID and lane layout + typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); + + MatrixCoord lane_offset = lane_layout.inverse(lane_id) * + MatrixCoord(0, Policy::LaneMmaShape::kN); + + ref.add_coord_offset(lane_offset); + + k_group_idx_ = 0; + + ref_.reset( + reinterpret_cast *>(ref.data()), + ref.stride(0) / Policy::LaneMmaShape::kKN); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { + + ref_.add_coord_offset({ + coord.row() * Shape::kRow, + coord.column() * Shape::kColumn / Policy::LaneMmaShape::kKN}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator++() { + + add_tile_offset({1, 0}); + + if (kPartitionsK > 1) { + ++k_group_idx_; + // Jump to next stage + if (k_group_idx_ == kGroupPerTile) { + k_group_idx_ = 0; + add_tile_offset({kGroupPerTile * (kPartitionsK-1), 0}); + } + } + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaSimtTileIterator & operator--() { + + ref_.add_coord_offset({-Shape::kRow, 0}); + + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { + + Array *dst_ptr = + reinterpret_cast *>(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kRow; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Iterations::kColumn; ++n) { + dst_ptr[n + k * Iterations::kColumn] = + *(ref_.data() + ref_.offset({k * Policy::LaneMmaShape::kK, + n * Policy::WarpShape::kColumn / kInterleave}) + pointer_offset / Policy::LaneMmaShape::kN); + } + } + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { + + Array const *src_ptr = + reinterpret_cast *>(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kM; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Iterations::kN; ++n) { + *(ref_.data() + ref_.offset({k, n * Policy::WarpShape::kN}) + pointer_offset / Policy::LaneMmaShape::kN) = + src_ptr[n + k * Iterations::kN]; + } + } + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store(Fragment const &frag, Index pointer_offset) const { + store_with_pointer_offset(frag, 0); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation here + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_sparse_tensor_op.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_sparse_tensor_op.h new file mode 100644 index 0000000000000000000000000000000000000000..e049f4f0fa6ce2cef21832b29b7bcdd37cdb734a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_sparse_tensor_op.h @@ -0,0 +1,339 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing warp-level matrix multiply-accumulate + operations targeting sparse Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/platform/platform.h" + +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/arch/mma_sm75.h" +#include "cutlass/arch/mma_sm80.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma.h" + +#include "cutlass/gemm/warp/mma_tensor_op_policy.h" +#include "cutlass/gemm/warp/mma_tensor_op.h" + +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sparse.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename ElementC_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Number of partitions along K dimension + int PartitionsK_ = 1, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false, + /// Used for partial specialization + typename Enable = bool +> +class SparseMmaTensorOp { +public: + /// Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + /// Data type of multiplicand A + using ElementA = ElementA_; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of multiplicand B + using ElementB = ElementB_; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of accumulator matrix C + using ElementC = ElementC_; + + /// Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) + using Policy = Policy_; + + /// Equivalant base dense mma + using Base = MmaTensorOp; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename Base::ArchMmaOperator; + + /// Indicates math operator + using MathOperator = typename ArchMmaOperator::Operator; + + /// Architecture tag from underlying instruction + using ArchTag = typename Base::ArchTag; + + /// Indicates class of matrix operator + using OperatorClass = typename Base::OperatorClass; + + /// Shape of underlying instruction + using InstructionShape = typename Base::InstructionShape; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = Base::kTransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = Base::kTransformB; + + /// Number of threads participating in warp-level matrix product + static int const kThreadCount = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// Sparsity in Operand A + static int const kSparse = Policy::Operator::kSparse; + + /// Meta data size in bits + static int const kMetaSizeInBits = Policy::Operator::kMetaSizeInBits; + + /// Max ID2 + static int const kMaxID2 = Policy::Operator::kMaxID2; + + /// Data type of meta E that is moved at the same time + using ElementE = + typename cutlass::platform::conditional::type; + + /// Number of ElementA that is associated with one ElementE + static int const kElementsPerElementE = + 128 / cutlass::sizeof_bits::value; + + /// Meta data is essentially interleaved but mapped to ColumnMajor internally + static int const kInterleaved = 2; + + /// Layout of meta E + using LayoutE = cutlass::layout::ColumnMajor; + + public: + + /// Iterates over the A operand in memory + using IteratorA = MmaTensorOpMultiplicandTileIterator< + MatrixShape, Operand::kA, ElementA, + LayoutA, + MatrixShape, + Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; + + /// Storage for A tile + using FragmentA = typename IteratorA::Fragment; + + /// Storage for transformed A tile + using TransformedFragmentA = + Array; + + /// Iterates over the B operand in memory + using IteratorB = typename Base::IteratorB; + + /// Storage for B tile + using FragmentB = typename Base::FragmentB; + + /// Storage for transformed B tile + using TransformedFragmentB = typename Base::TransformedFragmentB; + + /// Iterates over the C operand in memory + using IteratorC = typename Base::IteratorC; + + /// Storage for C tile + using FragmentC = typename Base::FragmentC; + + /// Iterates over the E operand in memory + using IteratorE = SparseMmaTensorOpMetaTileIterator< + MatrixShape, + ElementE, LayoutE, + MatrixShape, + Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; + + /// Storage for E tile + using FragmentE = typename IteratorE::Fragment; + + /// Number of mma operations performed + using MmaIterations = typename Base::MmaIterations; + +public: + + /// Underlying matrix multiply operator (concept: arch::Mma) + ArchMmaOperator mma; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + SparseMmaTensorOp() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + TransformedFragmentA const &A, + TransformedFragmentB const &B, + FragmentC const &C, + FragmentE const &E + ) const { + + using MmaOperandA = typename Policy::Operator::FragmentA; + using MmaOperandB = typename Policy::Operator::FragmentB; + using MmaOperandC = typename Policy::Operator::FragmentC; + using MmaOperandE = typename Policy::Operator::FragmentE; + + #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) + + D = C; + + MmaOperandA const *ptr_A = reinterpret_cast(&A); + MmaOperandB const *ptr_B = reinterpret_cast(&B); + MmaOperandC *ptr_D = reinterpret_cast(&D); + MmaOperandE const *ptr_E = reinterpret_cast(&E); + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < MmaIterations::kRow; ++m) { + + int id2 = m % kMaxID2; + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n); + + if (AccumulatorsInRowMajor) { // matrix B is reordered + mma( + ptr_D[n_serpentine + m * MmaIterations::kColumn], + ptr_A[m], + ptr_B[n_serpentine], + ptr_D[n_serpentine + m * MmaIterations::kColumn], + ptr_E[(m / kMaxID2)], + id2); + } else { + mma(ptr_D[m + n_serpentine * MmaIterations::kRow], + ptr_A[m], + ptr_B[n_serpentine], + ptr_D[m + n_serpentine * MmaIterations::kRow], + ptr_E[(m / kMaxID2)], + id2); + } + } + } + #else + assert(0); + #endif + } + + /// Transform the mma operands to the required types + CUTLASS_DEVICE + void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, + FragmentA const &A, FragmentB const &B) const { + + #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) + // + // Define conversions from source type to instruction type + // + FloatRoundStyle const kRoundA = + PreferredRoundingMode::kRound; + FloatRoundStyle const kRoundB = + PreferredRoundingMode::kRound; + detail::ConvertAndPack + convert_A; + NumericArrayConverter + convert_B; + Array const *ptr_A = + reinterpret_cast const *>(&A); + Array * + ptr_dst_A = reinterpret_cast *>(&dst_A); + + dst_B = convert_B(B); + + ptr_dst_A[0] = convert_A(ptr_A[0]); + ptr_dst_A[1] = convert_A(ptr_A[1]); + #else + assert(0); + #endif + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op.h new file mode 100644 index 0000000000000000000000000000000000000000..3124618c2807727429d12813ee5f96a8a38408f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op.h @@ -0,0 +1,431 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing warp-level matrix multiply-accumulate operations targeting + Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/platform/platform.h" + +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/arch/mma_sm75.h" +#include "cutlass/arch/mma_sm80.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma.h" + +#include "cutlass/gemm/warp/mma_tensor_op_policy.h" + +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template +struct ConvertAndPack { + + using Converter = NumericArrayConverter; + + CUTLASS_HOST_DEVICE + Array operator()(Array const &source) { + Converter converter; + + return converter(source); + } +}; + +template +struct ConvertAndPack { + + CUTLASS_HOST_DEVICE + Array operator()(Array const &source) { + return source; + } +}; + +template +struct ConvertAndPack { + + using Converter = NumericArrayConverter; + + CUTLASS_HOST_DEVICE + Array operator()(Array const &source) { + Converter converter; + + Array tmp; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < N; ++i) { + int idx = (((i << 1) & 2) | ((i >> 1) & 1) | (i & 0xfffffffc)); + tmp[i] = source[idx]; + } + + return converter(tmp); + } +}; + +template +struct ConvertAndPack { + + using Converter = NumericArrayConverter; + + CUTLASS_HOST_DEVICE + Array operator()(Array const &source) { + Converter converter; + + Array tmp; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < N; ++i) { + int idx = (((i << 1) & 2) | ((i >> 1) & 1) | (i & 0xfffffffc)); + tmp[i] = source[idx]; + } + + return converter(tmp); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace detail + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename ElementC_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Number of partitions along K dimension + int PartitionsK_ = 1, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false, + /// Used for partial specialization + typename Enable = bool +> +class MmaTensorOp { +public: + /// Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + /// Data type of multiplicand A + using ElementA = ElementA_; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of multiplicand B + using ElementB = ElementB_; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of accumulator matrix C + using ElementC = ElementC_; + + /// Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) + using Policy = Policy_; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename Policy::Operator; + + /// Indicates math operator + using MathOperator = typename ArchMmaOperator::Operator; + + /// Architecture tag from underlying instruction + using ArchTag = typename ArchMmaOperator::ArchTag; + + /// Indicates class of matrix operator + using OperatorClass = arch::OpClassTensorOp; + + /// Shape of underlying instruction + using InstructionShape = typename ArchMmaOperator::Shape; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = ComplexTransform::kNone; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = ComplexTransform::kNone; + + /// Number of threads participating in warp-level matrix product + static int const kThreadCount = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + +public: + + /// Iterates over the A operand in memory + using IteratorA = MmaTensorOpMultiplicandTileIterator< + MatrixShape, Operand::kA, ElementA, LayoutA, + MatrixShape, + Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; + + /// Storage for A tile + using FragmentA = typename IteratorA::Fragment; + + /// Storage for transformed A tile + using TransformedFragmentA = + Array; + + /// Iterates over the B operand in memory + using IteratorB = MmaTensorOpMultiplicandTileIterator< + MatrixShape, Operand::kB, ElementB, LayoutB, + MatrixShape, + Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; + + /// Storage for B tile + using FragmentB = typename IteratorB::Fragment; + + /// Storage for transformed B tile + using TransformedFragmentB = + Array; + + /// Iterates over the C operand in memory + using IteratorC = MmaTensorOpAccumulatorTileIterator< + MatrixShape, ElementC, LayoutC, + typename ArchMmaOperator::Shape, typename Policy::OpDelta>; + + /// Storage for C tile + using FragmentC = typename IteratorC::Fragment; + + /// Number of mma operations performed + using MmaIterations = MatrixShape< + (Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM, + (Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN + >; + +public: + + /// Underlying matrix multiply operator (concept: arch::Mma) + ArchMmaOperator mma; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + MmaTensorOp() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + TransformedFragmentA const &A, + TransformedFragmentB const &B, + FragmentC const &C + ) const { + + using MmaOperandA = typename ArchMmaOperator::FragmentA; + using MmaOperandB = typename ArchMmaOperator::FragmentB; + using MmaOperandC = typename ArchMmaOperator::FragmentC; + + D = C; + + MmaOperandA const *ptr_A = reinterpret_cast(&A); + MmaOperandB const *ptr_B = reinterpret_cast(&B); + MmaOperandC *ptr_D = reinterpret_cast(&D); + + #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800) + // Serpentine visitation order maximizing reuse of Rb + // The visitation order is like + // _ + // | | | | + // | | | | + // |_| |_| + // + // Down Up Down Up + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < MmaIterations::kRow; ++m) { + + int m_serpentine = ((n % 2) ? (MmaIterations::kRow - 1 - m) : m); + + if (AccumulatorsInRowMajor) { // matrix B is reordered + mma( + ptr_D[n + m_serpentine * MmaIterations::kColumn], + ptr_A[m_serpentine], + ptr_B[n], + ptr_D[n + m_serpentine * MmaIterations::kColumn]); + } else { + mma( + ptr_D[m_serpentine + n * MmaIterations::kRow], + ptr_A[m_serpentine], + ptr_B[n], + ptr_D[m_serpentine + n * MmaIterations::kRow]); + } + } + } + #elif defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) + // Serpentine visitation order maximizing reuse of Ra + // The visitation order is like + // _________ + // _________| + // |_________ + // __________| + // + // Right Left Right Left + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < MmaIterations::kRow; ++m) { + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n); + + if (AccumulatorsInRowMajor) { // matrix B is reordered + mma( + ptr_D[n_serpentine + m * MmaIterations::kColumn], + ptr_A[m], + ptr_B[n_serpentine], + ptr_D[n_serpentine + m * MmaIterations::kColumn]); + } else { + mma(ptr_D[m + n_serpentine * MmaIterations::kRow], + ptr_A[m], + ptr_B[n_serpentine], + ptr_D[m + n_serpentine * MmaIterations::kRow]); + } + } + } + #else + assert(0); + #endif + } + + /// Transform the mma operands to the required types + CUTLASS_DEVICE + void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, + FragmentA const &A, FragmentB const &B) const { + + // + // Define conversions from source type to instruction type + // + FloatRoundStyle const kRoundA = + PreferredRoundingMode::kRound; + FloatRoundStyle const kRoundB = + PreferredRoundingMode::kRound; + #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800) + detail::ConvertAndPack + convert_A; + NumericArrayConverter + convert_B; + Array const *ptr_B = + reinterpret_cast const *>(&B); + Array * + ptr_dst_B = reinterpret_cast *>(&dst_B); + + dst_A = convert_A(A); + + ptr_dst_B[0] = convert_B(ptr_B[0]); + ptr_dst_B[1] = convert_B(ptr_B[1]); + + #elif defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) + detail::ConvertAndPack + convert_A; + NumericArrayConverter + convert_B; + Array const *ptr_A = + reinterpret_cast const *>(&A); + Array * + ptr_dst_A = reinterpret_cast *>(&dst_A); + + dst_B = convert_B(B); + + ptr_dst_A[0] = convert_A(ptr_A[0]); + ptr_dst_A[1] = convert_A(ptr_A[1]); + #else + assert(0); + #endif + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#include "cutlass/gemm/warp/mma_tensor_op_fast_f32.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_fast_f32.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_fast_f32.h new file mode 100644 index 0000000000000000000000000000000000000000..d17edc12360c9d24cb05ff9475013f145e9de381 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_fast_f32.h @@ -0,0 +1,471 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Templates implementing warp-level matrix multiply-accumulate operations targeting + Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/platform/platform.h" + +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/mma_sm80.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma.h" + +#include "cutlass/gemm/warp/mma_tensor_op_policy.h" +#include "cutlass/gemm/warp/mma_tensor_op.h" + +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +enum class TensorFloat32Op { + k3xTF32, + k4xTF32 +}; + +template < + /// Floating-point rounding style + FloatRoundStyle RoundBigA_, + /// Floating-point rounding style + FloatRoundStyle RoundSmallA_, + /// Floating-point rounding style + FloatRoundStyle RoundBigB_ = RoundBigA_, + /// Floating-point rounding style + FloatRoundStyle RoundSmallB_ = RoundSmallA_, + /// Precision for TensorFloat32Op + // (k3xTF32: BigxBig, BigxSmall, SmallxBig) + // (k4xTF32: BigxBig, BigxSmall, SmallxBig, SmallxSmall) + TensorFloat32Op Precision_ = TensorFloat32Op::k3xTF32 + > +struct FastF32 { + + static FloatRoundStyle const kRoundBigA = RoundBigA_; + static FloatRoundStyle const kRoundSmallA = RoundSmallA_; + static FloatRoundStyle const kRoundBigB = RoundBigB_; + static FloatRoundStyle const kRoundSmallB = RoundSmallB_; + static TensorFloat32Op const kPrecision = Precision_; +}; + + +namespace detail { + + template< + int N, + FloatRoundStyle RoundBig = FloatRoundStyle::round_toward_zero, + FloatRoundStyle RoundSmall = FloatRoundStyle::round_half_ulp_truncate + > + struct ConvertAndPackAccurateF32 { + + /// Rounding styles for big and small part + static FloatRoundStyle const kRoundBig = RoundBig; + static FloatRoundStyle const kRoundSmall = RoundSmall; + + /// Converter type + using Converter = NumericConverterFastF32; + + /// Source fragement + using SourceFragment = Array; + + /// Destination fragment + using DestinationFragment = Array; + + /// Converter Fragment holding two tfloat32_t elements for every float + using ConverterFragment = Array; + + /// Index in fargments for the big and small part + static int const kBigIndex = 0; + static int const kSmallIndex = 1; + + CUTLASS_HOST_DEVICE + void operator()(SourceFragment const &source, + DestinationFragment &dst_big, + DestinationFragment &dst_small) { + + Converter convert_; + ConverterFragment result_; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < N; ++i) { + // convert source to result fragment + result_ = convert_(source[i]); + + // store converted result fragments to destination fragment + dst_big[i] = result_[kBigIndex]; + dst_small[i] = result_[kSmallIndex]; + } + } + }; +} // namespace detail + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename ElementC_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Number of partitions along K dimension + int PartitionsK_ = 1, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false, + /// Used for partial specialization + typename Enable = bool +> +class MmaTensorOpFastF32; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for float*float+float => float using TF32 TensorOps +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Number of partitions along K dimension + int PartitionsK_, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor, + /// Used for partial specialization + typename Enable +> +class MmaTensorOpFastF32< + Shape_, + float, LayoutA_, + float, LayoutB_, + float, LayoutC_, + Policy_, PartitionsK_, + AccumulatorsInRowMajor, Enable> { +public: + /// Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + /// Data type of multiplicand A + using ElementA = float; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of multiplicand B + using ElementB = float; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of accumulator matrix C + using ElementC = float; + + /// Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) + using Policy = Policy_; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename Policy::Operator; + + /// Indicates math operator + using MathOperator = arch::OpMultiplyAddFastF32; + + /// Architecture tag from underlying instruction + using ArchTag = typename ArchMmaOperator::ArchTag; + + /// Indicates class of matrix operator + using OperatorClass = arch::OpClassTensorOp; + + /// Shape of underlying instruction + using InstructionShape = typename ArchMmaOperator::Shape; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = ComplexTransform::kNone; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = ComplexTransform::kNone; + + /// Number of threads participating in warp-level matrix product + static int const kThreadCount = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// Tune F32 to TF32 big small conversion for float operation + /// Different combination of big small conversin can cause different tradeoff + /// between speed and accuracy. Generally, use round_half_ulp_truncate can + /// improve the performance but hur the accuracy. + using MmaFastF32 = FastF32 < + FloatRoundStyle::round_toward_zero, // kRoundBigA + FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallA + FloatRoundStyle::round_toward_zero, // kRoundBigB + FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallB + TensorFloat32Op::k3xTF32 // Number of TF32 operations + >; + +public: + + /// Iterates over the A operand in memory + using IteratorA = MmaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kA, + ElementA, + LayoutA, + MatrixShape, + Policy::OpDelta::kRow, + kThreadCount, + kPartitionsK + >; + + /// Storage for A tile + using FragmentA = typename IteratorA::Fragment; + + /// Storage for transformed A tile + using TransformedFragmentA = + Array; + + /// Fragment bisecting big and small sections + using AccessTypeFragmentA = + Array; + + /// Iterates over the B operand in memory + using IteratorB = MmaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kB, + ElementB, + LayoutB, + MatrixShape, + Policy::OpDelta::kRow, + kThreadCount, + kPartitionsK + >; + + /// Storage for B tile + using FragmentB = typename IteratorB::Fragment; + + /// Storage for transformed B tile + using TransformedFragmentB = + Array; + + /// Fragment bisecting big and small sections + using AccessTypeFragmentB = + Array; + + /// Index in fargments for the big and small part + static int const kBigIndex = 0; + static int const kSmallIndex = 1; + + /// Iterates over the C operand in memory + using IteratorC = MmaTensorOpAccumulatorTileIterator< + MatrixShape, ElementC, LayoutC, + typename ArchMmaOperator::Shape, typename Policy::OpDelta>; + + /// Storage for C tile + using FragmentC = typename IteratorC::Fragment; + + /// Number of mma operations performed + using MmaIterations = MatrixShape< + (Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM, + (Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN + >; + +public: + + /// Underlying matrix multiply operator (concept: arch::Mma) + ArchMmaOperator mma; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + MmaTensorOpFastF32() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + TransformedFragmentA const &A, + TransformedFragmentB const &B, + FragmentC const &C + ) const { + + AccessTypeFragmentA const *ptr_A = reinterpret_cast(&A); + AccessTypeFragmentB const *ptr_B = reinterpret_cast(&B); + + // + // Accumulate in place + // + D = C; + + mma_operator(D, ptr_A[kSmallIndex], ptr_B[kBigIndex], D); + + mma_operator(D, ptr_A[kBigIndex], ptr_B[kSmallIndex], D); + + mma_operator(D, ptr_A[kBigIndex], ptr_B[kBigIndex], D); + + if (MmaFastF32::kPrecision == TensorFloat32Op::k4xTF32) + mma_operator(D, ptr_A[kSmallIndex], ptr_B[kSmallIndex], D); + } + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void mma_operator( + FragmentC &D, + AccessTypeFragmentA const &A, + AccessTypeFragmentB const &B, + FragmentC const &C + ) const { + + #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) + + using MmaOperandA = typename ArchMmaOperator::FragmentA; + using MmaOperandB = typename ArchMmaOperator::FragmentB; + using MmaOperandC = typename ArchMmaOperator::FragmentC; + + MmaOperandA const *ptr_A = reinterpret_cast(&A); + MmaOperandB const *ptr_B = reinterpret_cast(&B); + MmaOperandC *ptr_D = reinterpret_cast(&D); + + // Serpentine visitation order maximizing reuse of Ra + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < MmaIterations::kRow; ++m) { + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + // This allows to reuse of Rb when at serpentine turns + int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n); + + if (AccumulatorsInRowMajor) { // matrix B is reordered + mma( + ptr_D[n_serpentine + m * MmaIterations::kColumn], + ptr_A[m], + ptr_B[n_serpentine], + ptr_D[n_serpentine + m * MmaIterations::kColumn]); + } else { + mma( + ptr_D[m + n_serpentine * MmaIterations::kRow], + ptr_A[m], + ptr_B[n_serpentine], + ptr_D[m + n_serpentine * MmaIterations::kRow]); + } + } // end n loop + } // end m loop + #else + assert(0); + #endif + } + + /// Transform the mma operands to the required types + CUTLASS_DEVICE + void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, + FragmentA const &A, FragmentB const &B) const { + + // + // Define conversions from source type to instruction type + // + #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) + + detail::ConvertAndPackAccurateF32< + FragmentA::kElements / 2, + MmaFastF32::kRoundBigA, + MmaFastF32::kRoundSmallA> convert_A; + + detail::ConvertAndPackAccurateF32< + FragmentB::kElements, + MmaFastF32::kRoundBigB, + MmaFastF32::kRoundSmallB> convert_B; + + Array *ptr_dst_B = + reinterpret_cast *>(&dst_B); + + convert_B(B, ptr_dst_B[0], ptr_dst_B[1]); + + Array *ptr_dst_A = + reinterpret_cast *>(&dst_A); + + Array const *ptr_A = + reinterpret_cast const *>(&A); + + convert_A(ptr_A[0], ptr_dst_A[0], ptr_dst_A[2]); + + convert_A(ptr_A[1], ptr_dst_A[1], ptr_dst_A[3]); + #else + assert(0); + #endif + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..aa2806db118284590ad2e23caca48d1a9ba628be --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h @@ -0,0 +1,528 @@ +/*! \file + \brief This defines a "fragment" iterator for visiting the fragments of a warp tile + that participate in one warp-level mma operation. + + Typically, this is used to access the accumulator tile/fragement of a warp-level mma operation. + The accumulator tile is then partitioned into smaller tiles/fragments that can be fed into + next warp-level mma operation. + + This iterator is necessary to accomplish warp-level mma fusion where the accumulator tile is + reused as multiplicand tile for the next mma. + +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/array.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/numeric_conversion.h" + +namespace cutlass { +namespace gemm { +namespace warp { + + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Size of the accumulation tile shape (concept: MatrixShape) + typename AccumulatorShape_, + /// KBlocks columns to compute residual + int KBlocksColumn_, + /// Accumulator Element type + typename ElementAccumulator_, + /// Element type + typename Element_, + /// Layout of operand in memory + typename Layout_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Output operation on the fragment + typename OutputOp_> +class MmaTensorOpFragmentIterator; + + +// Partial specialization for col-major accumulator tile + +template < + /// Shape of warp tile to load (concept: MatrixShape) + typename Shape_, + /// Shape of the warp accumulation tile (concept: MatrixShape) + typename AccumulatorShape_, + /// KBlocks columns to compute residual + int KBlocksColumn_, + /// Accumulator Element type + typename ElementAccumulator_, + /// Element type + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Output operation on fragment + typename OutputOp_> +class MmaTensorOpFragmentIterator { + public: + + /// Shape of warp tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Shape of the warp accumulation tile (concept: MatrixShape) + using AccumulatorShape = AccumulatorShape_; + + /// KBlocks columns to compute residual + static int const kKBlockColumn = KBlocksColumn_; + + /// Accumulator Element type + using ElementAccumulator = ElementAccumulator_; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::ColumnMajor; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Output operation on fragment + using OutputOp = OutputOp_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static_assert( + !(Shape::kRow % InstructionShape::kM) && + !(Shape::kColumn % InstructionShape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + static_assert( + AccumulatorShape::kRow == Shape::kRow, + "Rows of Warp Accumulator must be the same as rows of warp"); + static_assert( + !(AccumulatorShape::kColumn % Shape::kColumn), + "Shape of Warp Accumulator must be divisible by warp shape."); + static_assert( + !(kKBlockColumn % Shape::kColumn), + "KBlock size must be divisible by warp shape."); + + /// Number of times this iterator can be incremented + static int const kIterations = AccumulatorShape::kCount / Shape::kCount; + }; + +private: + + static int const kElementsPerAccess = InstructionShape::kM * InstructionShape::kN / kThreads; + + /// Number of mma operations performed by a warp + using MmaIterations = MatrixShape; + /// Number of mma operations performed by the entire accumulator + using AccumulatorIterations = MatrixShape; + + /// Number of K iterations + static int const kKBlockIterations = (AccumulatorShape::kColumn + kKBlockColumn - 1) / kKBlockColumn; + static int const kResidualColumn = AccumulatorShape::kColumn - (kKBlockIterations - 1) * kKBlockColumn; + static int const kKBlockColumnIterations = kKBlockColumn / Shape::kColumn + * (AccumulatorShape::kRow / Shape::kRow); + static int const kResidualIndex = kResidualColumn / Shape::kColumn + * (AccumulatorShape::kRow / Shape::kRow); + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + /// This is the fragment size produced by one access of the iterator. + using Fragment = Array; + + /// Accumulator Fragment object + using AccumulatorFragment = Array; + + /// Scale Bias Element Type + using ElementScaleBias = typename OutputOp::ElementCompute; + + /// Scale Bias Fragment object + using ScaleBiasFragment = Array; + + +private: + + /// Internal access type + using AccessType = Array; + using FragmentAccessType = Array; + + using ScaleBiasAccessType = Array; + +private: + // + // Data members + // + + /// Accumulator tile + AccessType const *accumulators_; + + /// Internal index + int index_; + + /// Used to access residual tile first + bool is_residual_tile_; + +public: + /// Constructs an iterator + CUTLASS_HOST_DEVICE + MmaTensorOpFragmentIterator(AccumulatorFragment const &accum) + : accumulators_(reinterpret_cast(&accum)), + index_(0), is_residual_tile_(true) {} + + /// Add offset + CUTLASS_HOST_DEVICE + void add_offset(int index_offset) { + index_ += index_offset; + if(is_residual_tile_ && index_ >= kKBlockColumnIterations) { + index_ = index_ - kKBlockColumnIterations + kResidualIndex; + is_residual_tile_ = false; + } + } + + /// Increments + CUTLASS_HOST_DEVICE + MmaTensorOpFragmentIterator &operator++() { + add_offset(1); + return *this; + } + + /// Decrements + CUTLASS_HOST_DEVICE + MmaTensorOpFragmentIterator &operator--() { + add_offset(-1); + return *this; + } + + /// Loads a fragment from the referenced part of the accumulator tile + CUTLASS_HOST_DEVICE + void load(Fragment &frag, OutputOp output_op) const { + + if (output_op.is_source_needed()) //beta must be zero + assert(0); + + FragmentAccessType *frag_ptr = reinterpret_cast(&frag); + + int index = index_ * MmaIterations::kCount; + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; n++) { + for (int m = 0; m < MmaIterations::kRow; m++) { + int accumulator_access_offset = + n * AccumulatorIterations::kRow + m + index; + + frag_ptr[m * MmaIterations::kColumn + n].clear(); + if(!(is_residual_tile_ && index_ >= kResidualIndex)) + frag_ptr[m * MmaIterations::kColumn + n] = output_op(accumulators_[accumulator_access_offset]); + } + } + } + + /// Loads a fragment from the referenced part of the accumulator tile + /// Then apply per-channel scale and bias + CUTLASS_HOST_DEVICE + void load(Fragment &frag, ScaleBiasFragment &scale, + ScaleBiasFragment &bias, OutputOp output_op) const { + + if (output_op.is_source_needed()) //beta must be zero + assert(0); + + FragmentAccessType *frag_ptr = reinterpret_cast(&frag); + ScaleBiasAccessType * scale_ptr = reinterpret_cast(&scale); + ScaleBiasAccessType * bias_ptr = reinterpret_cast(&bias); + + int index = index_ * MmaIterations::kCount; + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; n++) { + for (int m = 0; m < MmaIterations::kRow; m++) { + int accumulator_access_offset = + n * AccumulatorIterations::kRow + m + index; + + frag_ptr[m * MmaIterations::kColumn + n].clear(); + if(!(is_residual_tile_ && index_ >= kResidualIndex)) + frag_ptr[m * MmaIterations::kColumn + n] = + output_op(accumulators_[accumulator_access_offset], + scale_ptr[n] /*scale*/, bias_ptr[n] /*bias*/); + } + } + } + + + +}; + +// Partial specialization for row-major accumulator tile + +template < + /// Shape of warp tile to load (concept: MatrixShape) + typename Shape_, + /// Shape of the warp accumulation tile (concept: MatrixShape) + typename AccumulatorShape_, + /// KBlocks columns to compute residual + int KBlocksColumn_, + /// Accumulator Element type + typename ElementAccumulator_, + /// Element type + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Output operation on fragment + typename OutputOp_> +class MmaTensorOpFragmentIterator { + public: + + /// Shape of warp tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Shape of the warp accumulation tile (concept: MatrixShape) + using AccumulatorShape = AccumulatorShape_; + + /// KBlocks columns to compute residual + static int const kKBlockColumn = KBlocksColumn_; + + /// Accumulator Element type + using ElementAccumulator = ElementAccumulator_; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajor; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Output operation on fragment + using OutputOp = OutputOp_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static_assert( + !(Shape::kRow % InstructionShape::kM) && + !(Shape::kColumn % InstructionShape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + static_assert( + AccumulatorShape::kRow == Shape::kRow, + "Rows of Warp Accumulator must be the same as rows of warp"); + static_assert( + !(AccumulatorShape::kColumn % Shape::kColumn), + "Shape of Warp Accumulator must be divisible by warp shape."); + static_assert( + !(kKBlockColumn % Shape::kColumn), + "KBlock size must be divisible by warp shape."); + + /// Number of times this iterator can be incremented + static int const kIterations = AccumulatorShape::kCount / Shape::kCount; + }; + +private: + + static int const kRowsPerIteration = 8; + static int const kColumnsPerIteration = 16; + static int const kElementsPerIteration = kRowsPerIteration * InstructionShape::kN / kThreads; + static int const kElementsPerAccess = kRowsPerIteration * kColumnsPerIteration / kThreads; + static int const kIterationsPerAccess = kElementsPerAccess / kElementsPerIteration; + + // Number of iterations per actual instruction + static int const kIterationsPerInstruction = InstructionShape::kM / kRowsPerIteration; + + static int const kAccessStride = kIterationsPerInstruction; + + /// Number of mma operations performed by a warp + using MmaIterations = MatrixShape; + /// Number of mma operations performed by the entire accumulator + using AccumulatorIterations = MatrixShape; + + /// Number of Accesses in a warp + using AccessIterations = MatrixShape; + + /// Number of K iterations + static int const kKBlockIterations = (AccumulatorShape::kColumn + kKBlockColumn - 1) / kKBlockColumn; + static int const kResidualColumn = AccumulatorShape::kColumn - (kKBlockIterations - 1) * kKBlockColumn; + static int const kKBlockColumnIterations = kKBlockColumn / Shape::kColumn; + static int const kResidualIndex = kResidualColumn / Shape::kColumn; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + /// This is the fragment size produced by one access of the iterator. + using Fragment = Array; + + /// Accumulator Fragment object + using AccumulatorFragment = Array; + + /// Scale Bias Element Type + using ElementScaleBias = typename OutputOp::ElementCompute; + + /// Scale Bias Fragment object + using ScaleBiasFragment = Array; + + +private: + + /// Internal access type + using AccessType = Array; + using FragmentAccessType = Array; + using ScaleBiasAccessType = Array; + +private: + // + // Data members + // + + /// Accumulator tile + AccessType const *accumulators_; + + /// Internal index + int index_; + + /// Used to access residual tile first + bool is_residual_tile_; + +public: + /// Constructs an iterator + CUTLASS_HOST_DEVICE + MmaTensorOpFragmentIterator(AccumulatorFragment const &accum) + : accumulators_(reinterpret_cast(&accum)), + index_(0), is_residual_tile_(true) {} + + /// Add offset + CUTLASS_HOST_DEVICE + void add_offset(int index_offset) { + index_ += index_offset; + if(is_residual_tile_ && index_ >= kKBlockColumnIterations) { + index_ = index_ - kKBlockColumnIterations + kResidualIndex; + is_residual_tile_ = false; + } + } + + /// Increments + CUTLASS_HOST_DEVICE + MmaTensorOpFragmentIterator &operator++() { + add_offset(1); + return *this; + } + + /// Decrements + CUTLASS_HOST_DEVICE + MmaTensorOpFragmentIterator &operator--() { + add_offset(-1); + return *this; + } + + CUTLASS_HOST_DEVICE + void set_index(int idx) { + index_ = idx; + } + + /// Loads a fragment from the referenced part of the accumulator tile + CUTLASS_HOST_DEVICE + void load(Fragment &frag, OutputOp output_op) const { + + if (output_op.is_source_needed()) //beta must be zero + assert(0); + + FragmentAccessType *frag_ptr = reinterpret_cast(&frag); + + int index = index_ * AccessIterations::kCount; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < AccessIterations::kCount; i++) { + + int accumulator_access_offset = index / AccessIterations::kCount * (MmaIterations::kColumn * kIterationsPerInstruction) + + (index % AccessIterations::kCount) / (AccessIterations::kColumn * kIterationsPerInstruction) * + AccumulatorIterations::kColumn * kIterationsPerInstruction + + (index % (AccessIterations::kColumn * kIterationsPerInstruction)) / kIterationsPerInstruction * + (kIterationsPerInstruction * kIterationsPerAccess) + + (index % kIterationsPerInstruction); + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < kIterationsPerAccess; j++) { + + frag_ptr[i*kIterationsPerAccess + j].clear(); + if(!(is_residual_tile_ && index_ >= kResidualIndex)) + frag_ptr[i*kIterationsPerAccess + j] = output_op(accumulators_[accumulator_access_offset + j * kAccessStride]); + } + index++; + } + } + + /// Loads a fragment from the referenced part of the accumulator tile + /// Then apply per-channel scale and bias + CUTLASS_HOST_DEVICE + void load(Fragment &frag, ScaleBiasFragment &scale, + ScaleBiasFragment & bias, OutputOp output_op) const { + + if (output_op.is_source_needed()) //beta must be zero + assert(0); + + FragmentAccessType *frag_ptr = reinterpret_cast(&frag); + ScaleBiasAccessType * scale_ptr = reinterpret_cast(&scale); + ScaleBiasAccessType * bias_ptr = reinterpret_cast(&bias); + + int index = index_ * AccessIterations::kCount; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < AccessIterations::kCount; i++) { + + int accumulator_access_offset = index / AccessIterations::kCount * (MmaIterations::kColumn * kIterationsPerInstruction) + + (index % AccessIterations::kCount) / (AccessIterations::kColumn * kIterationsPerInstruction) * + AccumulatorIterations::kColumn * kIterationsPerInstruction + + (index % (AccessIterations::kColumn * kIterationsPerInstruction)) / kIterationsPerInstruction * + (kIterationsPerInstruction * kIterationsPerAccess) + + (index % kIterationsPerInstruction); + + int scale_bias_offset = (index + % (kIterationsPerInstruction * AccessIterations::kColumn)) + * kIterationsPerAccess; + + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < kIterationsPerAccess; j++) { + + + frag_ptr[i*kIterationsPerAccess + j].clear(); + if(!(is_residual_tile_ && index_ >= kResidualIndex)) + frag_ptr[i*kIterationsPerAccess + j] = output_op( + accumulators_[accumulator_access_offset + j * kAccessStride], + scale_ptr[scale_bias_offset + j], bias_ptr[scale_bias_offset + j]); + } + index++; + } + } + +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_policy.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_policy.h new file mode 100644 index 0000000000000000000000000000000000000000..f73ede627daf9f57974ac6e3e455f06554b127a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_policy.h @@ -0,0 +1,65 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Policy describing implementation details of warp-level GEMM targeting Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/gemm/gemm.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Policy +template < + typename Operator_, ///< hardware instruction(s) performing TensorOp (concept: arch::Mma) + typename OpDelta_ ///< distance between operations (concept: MatrixShape) +> +struct MmaTensorOpPolicy { + + using Operator = Operator_; ///< hardware instruction(s) performing TensorOp (concept: arch::Mma) + using OpDelta = OpDelta_; ///< distance between operations (concept: MatrixShape) + using MmaShape = typename Operator::Shape; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_sm70.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_sm70.h new file mode 100644 index 0000000000000000000000000000000000000000..0a2449d7689e41e87ef4bfe67db36b3c9c017057 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_sm70.h @@ -0,0 +1,280 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing warp-level matrix multiply-accumulate operations targeting + Tensor Cores. + + This is a work in progress. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/mma.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma.h" + +#include "cutlass/gemm/warp/mma_tensor_op_policy.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename ElementC_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Used for partial specialization + typename Enable = bool +> +class MmaVoltaTensorOp { +public: + /// Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + /// Data type of multiplicand A + using ElementA = ElementA_; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of multiplicand B + using ElementB = ElementB_; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of accumulator matrix C + using ElementC = ElementC_; + + /// Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) + using Policy = Policy_; + + /// Indicates class of matrix operator + using OperatorClass = arch::OpClassTensorOp; + + /// Architecture tag + using ArchTag = arch::Sm70; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename Policy::Operator; + + /// Indicates math operator + using MathOperator = typename ArchMmaOperator::Operator; + + /// Underlying instruction shape + using InstructionShape = typename ArchMmaOperator::Shape; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = ComplexTransform::kNone; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = ComplexTransform::kNone; + + /// Number of threads participating in warp-level matrix product + static int const kThreadCount = 32; + + /// interleaved 32x32 tiles + using InterleavedTileShape = GemmShape<32, 32, 4>; + + static_assert(!(Shape::kM % InterleavedTileShape::kM) && + !(Shape::kN % InterleavedTileShape::kN), + "Shape must be a multiple of InterleavedTileShape."); +public: + + /// Iterates over the A operand in memory + using IteratorA = MmaVoltaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kA, + ElementA, + LayoutA, + MatrixShape< + ArchMmaOperator::Shape::kM, + ArchMmaOperator::Shape::kK + >, + Policy::OpDelta::kRow, + kThreadCount + >; + + /// Storage for A tile + using FragmentA = typename IteratorA::Fragment; + + /// Iterates over the B operand in memory + using IteratorB = MmaVoltaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kB, + ElementB, + LayoutB, + MatrixShape< + ArchMmaOperator::Shape::kK, + ArchMmaOperator::Shape::kN + >, + Policy::OpDelta::kRow, + kThreadCount + >; + + /// Storage for B tile + using FragmentB = typename IteratorB::Fragment; + + /// Iterates over the C operand in memory + using IteratorC = MmaVoltaTensorOpAccumulatorTileIterator< + MatrixShape, + ElementC, + LayoutC, + typename ArchMmaOperator::Shape, + typename Policy::OpDelta + >; + + /// Storage for C tile + using FragmentC = typename IteratorC::Fragment; + +private: + + static_assert( + !(Shape::kM % ArchMmaOperator::Shape::kM) && + !(Shape::kN % ArchMmaOperator::Shape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + + /// Number of mma operations performed + using MmaIterations = MatrixShape< + InterleavedTileShape::kM / ArchMmaOperator::Shape::kM, + InterleavedTileShape::kN / ArchMmaOperator::Shape::kN + >; + using TileIterations = MatrixShape< + Shape::kM / InterleavedTileShape::kM, + Shape::kN / InterleavedTileShape::kN + >; + + // Whether matrix B is reordered + bool reorder_B_; + +public: + + /// Underlying matrix multiply operator (concept: arch::Mma) + ArchMmaOperator mma; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + MmaVoltaTensorOp() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + FragmentA const &A, + FragmentB const &B, + FragmentC const &C) { + + using MmaOperandA = typename ArchMmaOperator::FragmentA; + using MmaOperandB = typename ArchMmaOperator::FragmentB; + using MmaOperandC = typename ArchMmaOperator::FragmentC; + + D = C; + + MmaOperandA const *ptr_A = reinterpret_cast(&A); + MmaOperandB const *ptr_B = reinterpret_cast(&B); + MmaOperandC *ptr_D = reinterpret_cast(&D); + + CUTLASS_PRAGMA_UNROLL + for (int outer_col = 0; outer_col < TileIterations::kColumn; ++outer_col) { + CUTLASS_PRAGMA_UNROLL + for (int inner_col = 0; inner_col < MmaIterations::kColumn; ++inner_col) { + CUTLASS_PRAGMA_UNROLL + for (int outer_row = 0; outer_row < TileIterations::kRow; ++outer_row) { + CUTLASS_PRAGMA_UNROLL + + for (int inner_row = 0; inner_row < MmaIterations::kRow; ++inner_row) { + + int op_col = inner_col + MmaIterations::kColumn * outer_col; + + // Column-major serpentine sequence to maximize reuse of A operand. + int inner_row_serp = inner_row; + int outer_row_serp = outer_row; + if (op_col & 1) { + inner_row_serp = MmaIterations::kRow - inner_row - 1; + outer_row_serp = TileIterations::kRow - outer_row - 1; + } + int op_row = inner_row_serp + MmaIterations::kRow * outer_row_serp; + int op_idx = inner_row_serp + MmaIterations::kRow * + (inner_col + MmaIterations::kColumn * + (outer_row_serp + TileIterations::kRow * outer_col)); + mma( + ptr_D[op_idx], + ptr_A[op_row], + ptr_B[op_col], + ptr_D[op_idx]); + + } + } + } + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..5e4de60ab0e736da93b946cdd5dd7c17cb1dafd8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h @@ -0,0 +1,362 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm80.h" + +#include "cutlass/platform/platform.h" +#include "cutlass/fast_math.h" + +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + + +/// Tile access iterator +/// Each iteration acess in the tile is +/// used as multiplicand for one +/// warp-level matrix multiplication +template < + /// Size of the tile (concept: MatrixShape) + typename Shape_, + /// Operand identity + Operand Operand_, + /// Data type of A elements + typename Element_, + /// Layout of operand + typename Layout_, + /// Shape of one matrix production operation (concept: MatrixShape) + typename InstructionShape_, + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + int OpDelta_, + /// Number of threads participating in one matrix operation + int Threads = 32, + /// Enable Residual Support + bool EnableResidual = false, + /// Number of partitions along K dimension + int PartitionsK_ = 1 +> +class MmaTensorOpMultiplicandTileAccessIterator { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + /// Basic check + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = Layout_; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Number of elements accessed per Shared Memory load + static int const kElementsPerAccess = + (sizeof_bits::value >= 32 ? 1 : 32 / sizeof_bits::value); + + using InstructionCount = MatrixShape< + Shape::kRow / InstructionShape::kRow, + Shape::kColumn / InstructionShape::kColumn + >; + + static int const kIterations = (kOperand == Operand::kA) ? + InstructionCount::kColumn : InstructionCount::kRow; + + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = Array< + Element, + (kOperand == Operand::kA) ? + (Shape::kRow * InstructionShape::kColumn / kThreads) : + (Shape::kColumn * InstructionShape::kRow / kThreads) + >; + + /// Memory access type + using AccessType = AlignedArray; + +private: + + /// Underlying tensor reference + TensorRef ref_; + + /// Extent of tensor + MatrixCoord extent_; + + /// Origin + MatrixCoord origin_; + + /// Used to load residual tile + bool is_residual_; + + /// residual offset of each thread + TensorCoord residual_offset_; + + /// Iterations in a tile + int iterations_; + +public: + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileAccessIterator( + TensorRef const &ref, + TensorCoord extent, + int lane_id + ): ref_(ref), extent_(extent), is_residual_(false), iterations_(0) { + + if (kOperand == Operand::kA) { + origin_ = MatrixCoord(lane_id / 4, (lane_id % 4) * kElementsPerAccess); + } + else { + origin_ = MatrixCoord((lane_id % 4) * kElementsPerAccess, lane_id / 4); + } + + ref_.add_coord_offset(origin_); + + if(EnableResidual) { + // compute residual offset + if (kOperand == Operand::kA) { + typename TensorCoord::Index residual_size = + extent_.column() % Shape::kColumn; + if(residual_size) { + is_residual_ = true; + residual_offset_ = make_Coord(0, residual_size); + } + } + else { + typename TensorCoord::Index residual_size = + extent_.row() % Shape::kRow; + if(residual_size) { + is_residual_ = true; + residual_offset_ = make_Coord(residual_size, 0); + } + } + } + } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileAccessIterator( + TensorRef const &ref, + int lane_id + ): MmaTensorOpMultiplicandTileAccessIterator(ref, + {Shape::kRow, Shape::kColumn}, lane_id) { + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileAccessIterator &add_tile_offset(TensorCoord const &tile_offset) { + + TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); + origin_ += coord_offset; + + ref_.add_coord_offset(coord_offset); + + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + void advance() { + + if(EnableResidual && is_residual_) { + is_residual_ = false; + + origin_ += residual_offset_; + ref_.add_coord_offset(residual_offset_); + + } + + else { + if (kOperand == Operand::kA) { + add_tile_offset({0, 1}); + } + else { + add_tile_offset({1, 0}); + } + } + + iterations_ = 0; + } + + /// increase iterations in a tile + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileAccessIterator & operator++() { + + iterations_++; + + if(iterations_ >= kIterations) + advance(); + + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + int const kWarpShapeDivisibleInner = + (kOperand == Operand::kA ? InstructionShape::kColumn : InstructionShape::kRow); + + // Take advantage of Tensor Op's 8 x 4T access pattern + int const kAccessesInner = (kWarpShapeDivisibleInner / kElementsPerAccess) / 4; + + AccessType *access_ptr = reinterpret_cast(&frag); + + if (kOperand == Operand::kA) { + int const kTilesPerInstruction = InstructionShape::kRow / 8; + + CUTLASS_PRAGMA_UNROLL + for (int inst_m_idx = 0; inst_m_idx < InstructionCount::kRow; ++inst_m_idx) { + + CUTLASS_PRAGMA_UNROLL + for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) { + + CUTLASS_PRAGMA_UNROLL + for (int access_m_idx = 0; access_m_idx < kTilesPerInstruction; ++access_m_idx) { + int access_idx = + access_m_idx + kTilesPerInstruction * (inner_idx + kAccessesInner * inst_m_idx); + + MatrixCoord offset( + access_m_idx * 8 + inst_m_idx * InstructionShape::kRow, + inner_idx * 4 * kElementsPerAccess + iterations_ * InstructionShape::kColumn); + + MatrixCoord access_coord = origin_ + offset; + +// if(access_coord.row() < extent_.row() && access_coord.column() < extent_.column()) { + + access_ptr[access_idx] = *reinterpret_cast( + ref_.data() + ref_.offset(offset)); +// } +// else { +// AccessType zero; +// zero.clear(); +// access_ptr[access_idx] = zero; +// } + } + } + } + } + else { + CUTLASS_PRAGMA_UNROLL + for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn; ++inst_n_idx) { + + CUTLASS_PRAGMA_UNROLL + for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) { + int access_idx = inner_idx + kAccessesInner * inst_n_idx; + + MatrixCoord offset( + inner_idx * 4 * kElementsPerAccess + iterations_ * InstructionShape::kRow, + inst_n_idx * 8); + + MatrixCoord access_coord = origin_ + offset; + +// if(access_coord.row() < extent_.row() && access_coord.column() < extent_.column()) { + + access_ptr[access_idx] = *reinterpret_cast( + ref_.data() + ref_.offset(offset)); +// } +// else { +// AccessType zero; +// zero.clear(); +// access_ptr[access_idx] = zero; +// } + } + } + } + } + +}; + + + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..ac042cbc71430e67162544239f62914add8dfa24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator.h @@ -0,0 +1,3974 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" + +#include "cutlass/platform/platform.h" +#include "cutlass/fast_math.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Operand identity + Operand Operand, + /// Data type of A elements + typename Element_, + /// Layout of operand + typename Layout_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + int OpDelta_, + /// Number of threads participating in one matrix operation + int Threads, + /// Number of partitions along K dimension + int PartitionsK_ = 1> +class MmaTensorOpMultiplicandTileIterator; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared +/// memory and therefore must be initialized with a TensorRef to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::TensorOpMultiplicandCongruous::value, + 64>, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::TensorOpMultiplicandCongruous< + sizeof_bits::value, 64>; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static_assert( + !(Shape::kContiguous % InstructionShape::kContiguous), + "Shape of warp-level Mma must be divisible by operator shape."); + + // Determine number of elements along outer dimension per individual LDSM op + static int const kLdsmOpOuter = Layout::kElementsPerAccess; + static int const kLdsmOpInner = 8; + + static_assert(!(Shape::kContiguous % kLdsmOpOuter), + "Shape of warp-level mma must be divisible by LDSM's fundamental tile size."); + + static_assert(!(Shape::kStrided % kLdsmOpInner), + "Shape of warp-level mma must be divisible by LDSM's fundamental tile size."); + + /// Shape of one individual LDSM instruction + static int const LdsmShapeStrided = + InstructionShape::kStrided / kLdsmOpInner; + static int const LdsmShapeContiguous = 4 / LdsmShapeStrided; + using LdsmShape = + layout::PitchLinearShape; + + /// Number and arrangement of LDSM instructions + using LdsmIterations = layout::PitchLinearShape< + Shape::kContiguous / Layout::kElementsPerAccess / LdsmShapeContiguous, + 1>; + + /// Number of groups for each tile + static int const kGroupsPerTile = + Shape::kStrided / InstructionShape::kStrided; + }; + +private: + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Number of internal pointers needed to reference shared memory + static int const kPointerCount = + Layout::TileShape::kContiguous / Policy::LdsmShape::kContiguous; + + /// Pointer type used for accesses + using AccessType = Array; + + /// Internal counter used to jump to next K partition + int k_group_idx_; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + +private: + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_[kPointerCount]; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): + stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0), + k_group_idx_(0) { + + int quad_pair = (lane_id >> 3); + int quad_quad = (lane_id >> 4); + int lane_in_quad = (lane_id & 3); + int lane_in_quad_pair = (lane_id & 7); + int lane_in_quad_quad = (lane_id & 15); + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPointerCount; ++i) { + int partition_contiguous_idx = -1; + int access_contiguous_idx = -1; + int access_strided_idx = -1; + + if (Policy::LdsmShape::kContiguous == 4) { + // Matrix multiply 1688 A/B + // Q0 Q1 Q2 Q3 (Q stands for 1 8x128bit block). + // Four blocks are next to each other in the contiguous dimension. + partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ i); + access_contiguous_idx = (quad_pair ^ lane_in_quad); + access_strided_idx = lane_in_quad_pair; + } + else if (Policy::LdsmShape::kContiguous == 2 && + kOperand == Operand::kA) { + // Matrix multiply 16816 A + // Q0 Q1 + // Q2 Q3 + partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ (i >> 1)); + access_contiguous_idx = + (((quad_pair & 1) + ((i & 1) << 1)) ^ lane_in_quad); + access_strided_idx = lane_in_quad_pair + (lane_id >> 4 << 3); + } else if (Policy::LdsmShape::kContiguous == 2 && + kOperand == Operand::kB) { + // Matrix multiply 16816 B + // Q0 Q2 + // Q1 Q3 + partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ (i >> 1)); + access_contiguous_idx = ((quad_quad + ((i & 1) << 1)) ^ lane_in_quad); + access_strided_idx = lane_in_quad_quad; + } else if (Policy::LdsmShape::kContiguous == 1) { + // Matrix multiply 16832.SP B + // Q0 + // Q1 + // Q2 + // Q3 + partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ (i >> 2)); + access_contiguous_idx = ((i & 3) ^ lane_in_quad); + access_strided_idx = lane_id; + } + + int access_contiguous = + partition_contiguous_idx * Layout::PartitionShape::kContiguous + + access_contiguous_idx; + + int access_strided = access_strided_idx; + + pointer_[i] = reinterpret_cast(ref.data()) + + access_contiguous + access_strided * stride_; + } + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + byte_offset_ += offset * sizeof(Element); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + int contiguous_offset = tile_offset.contiguous(); + if (Shape::kContiguous == + Layout::PartitionShape::kContiguous * Layout::kElementsPerAccess) { + if (tile_offset.contiguous() % 2) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPointerCount / 2; ++i) { + AccessType const *tmp_pointer = pointer_[i]; + pointer_[i] = pointer_[i + kPointerCount / 2]; + pointer_[i + kPointerCount / 2] = tmp_pointer; + } + } + contiguous_offset = (tile_offset.contiguous() >> 1) << 1; + } + + int offset = (tile_offset.strided() * InstructionShape::kStrided) * + stride_ * Layout::kElementsPerAccess + + contiguous_offset * Shape::kContiguous; + + add_pointer_offset(offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + add_tile_offset({0, 1}); + + if (kPartitionsK > 1) { + ++k_group_idx_; + // Jump to next stage + if (k_group_idx_ == Policy::kGroupsPerTile) { + k_group_idx_ = 0; + add_tile_offset( + {0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)}); + } + } + + return *this; + } + + /// Advances the iterator along the opposite of the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) * + Layout::kElementsPerAccess; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_byte_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + Array *fetch_ptr = + reinterpret_cast *>(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) { + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) { + + int access_idx = c + s * Policy::LdsmIterations::kContiguous; + + AccessType const *source_ptr = + pointer_[c % kPointerCount] + + Layout::TileShape::kContiguous * (c / kPointerCount) + + Policy::kLdsmOpInner * Policy::LdsmShape::kStrided * s * stride_; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + byte_offset + byte_offset_; + + cutlass::arch::ldsm( + fetch_ptr[access_idx], + source_byte_ptr + ); + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = + tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess + + tile_offset.strided() * InstructionShape::kStrided * stride_; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no op + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread MMA.TF32 NT TensorOps. It +/// uses LDS.32 to load from shared memory and therefore must be initialized +/// with a TensorRef to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::TensorOpMultiplicandCongruous<32, 32>, InstructionShape_, + OpDelta_, 32, PartitionsK_> { + public: + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand == Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for " + "A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::TensorOpMultiplicandCongruous<32, 32>; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static_assert( + !(Shape::kContiguous % InstructionShape::kContiguous), + "Shape of warp-level Mma must be divisible by operator shape."); + + // Determine number of elements along outer dimension per individual 32bit + // shared memory load op. Every one warp of 32bit shared memory load loads + // 8x4 elements + static int const kLdsOpInner = Layout::TileShape::kStrided; + static int const kLdsOpOuter = kThreads / kLdsOpInner; + + static_assert(!(Shape::kContiguous % kLdsOpOuter), + "Shape of warp-level mma must be divisible by 32bit " + "fundamental tile size."); + + static_assert(!(Shape::kStrided % kLdsOpInner), + "Shape of warp-level mma must be divisible by 32bit " + "fundamental tile size."); + + /// Number of 32 bit shared memory load instructions needed by one MMA instruction + /// 1688 A 2x2 + /// 1688 B 1x2 + /// 16816 B 1x4 + static int const LdsShapeContiguous = + InstructionShape::kContiguous / kLdsOpOuter; + static int const LdsShapeStrided = InstructionShape::kStrided / kLdsOpInner; + using LdsShape = + layout::PitchLinearShape; + + /// Number and arrangement of LDS instructions + using LdsIterations = layout::PitchLinearShape< + Shape::kContiguous / LdsShapeContiguous / kLdsOpOuter, 1>; + + /// Number of groups for each tile + static int const kGroupsPerTile = + Shape::kStrided / InstructionShape::kStrided; + }; + + private: + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Number of internal pointers needed to reference shared memory + static int const kPointerCount = Layout::TileShape::kContiguous * + Layout::kElementsPerAccess / + Policy::kLdsOpOuter; + + /// Vectorized access is not used + static int const kElementsPerAccess = 1; + + /// Pointer type used for accesses + using AccessType = Element; + + /// Internal counter used to jump to next K partition + int k_group_idx_; + + public: + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + + private: + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_[kPointerCount]; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + + public: + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() : stride_(0), byte_offset_(0) {} + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id) + : stride_(ref.stride(0)), byte_offset_(0), k_group_idx_(0) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPointerCount; ++i) { + int access_strided = lane_id % Policy::kLdsOpInner; + int access_contiguous = (lane_id / Policy::kLdsOpInner) + + (access_strided ^ i) * Policy::kLdsOpOuter; + + pointer_[i] = reinterpret_cast(ref.data()) + + access_contiguous + access_strided * stride_; + } + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + byte_offset_ += offset * sizeof(Element); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset( + TensorCoord const &tile_offset) { + int contiguous_offset = tile_offset.contiguous(); + if (Shape::kContiguous == + Layout::TileShape::kContiguous * Layout::kElementsPerAccess / 2) { + if (tile_offset.contiguous() % 2) { + // Matrix multiply 1688 pointer_[0] <=> pointer_[4] pointer_[1] <=> pointer_[5] + // pointer_[2] <=> pointer_[6] pointer_[3] <=> pointer_[7] + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPointerCount / 2; ++i) { + AccessType const *tmp_pointer = pointer_[i]; + pointer_[i] = pointer_[i + kPointerCount / 2]; + pointer_[i + kPointerCount / 2] = tmp_pointer; + } + } + contiguous_offset = (tile_offset.contiguous() >> 1) << 1; + } + + int offset = (tile_offset.strided() * InstructionShape::kStrided) * stride_ + + contiguous_offset * Shape::kContiguous; + + add_pointer_offset(offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &operator++() { + add_tile_offset({0, 1}); + + if (kPartitionsK > 1) { + ++k_group_idx_; + // Jump to next stage + if (k_group_idx_ == Policy::kGroupsPerTile) { + k_group_idx_ = 0; + add_tile_offset( + {0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)}); + } + } + + return *this; + } + + /// Advances the iterator along the opposite of the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &operator--() { + byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) * + kElementsPerAccess; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &operator+=( + TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &operator-=( + TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + Element *fetch_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for (int ss = 0; ss < Policy::LdsShape::kStrided; ++ss) { + CUTLASS_PRAGMA_UNROLL + for (int cc = 0; cc < Policy::LdsShape::kContiguous; ++cc) { + int access_idx = + cc + (ss + (c + s * Policy::LdsIterations::kContiguous) * + Policy::LdsShape::kStrided) * + Policy::LdsShape::kContiguous; + int access_idx_contiguous = cc + c * Policy::LdsShape::kContiguous; + int access_idx_strided = + (ss + s * Policy::LdsShape::kStrided) * Policy::kLdsOpInner; + + AccessType const *source_ptr = + pointer_[access_idx_contiguous % kPointerCount] + + Layout::TileShape::kContiguous * Layout::kElementsPerAccess * + (access_idx_contiguous / kPointerCount) + + access_idx_strided * stride_; + + char const *source_byte_ptr = + reinterpret_cast(source_ptr) + byte_offset + + byte_offset_; + + fetch_ptr[access_idx] = + *reinterpret_cast(source_byte_ptr); + } + } + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = + tile_offset.contiguous() * Shape::kContiguous / + Layout::kElementsPerAccess + + tile_offset.strided() * InstructionShape::kStrided * stride_; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no op + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared +/// memory and therefore must be initialized with a TensorRef to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(Element_))>, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA, + "MmaTensorOpMultiplicandIterator for ColumnMajor Congruous may " + "only be instantiated for A operand to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(Element_))>; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicandCongruous::value, + int(128 / sizeof(Element_))>, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.contiguous(), tile_offset.strided()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared +/// memory and therefore must be initialized with a TensorRef to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(Element_))>, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kB, + "MmaTensorOpMultiplicandIterator for RowMajor Congruous may " + "only be instantiated for B operand to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(Element_))>; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicandCongruous::value, + int(128 / sizeof(Element_))>, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.strided(), tile_offset.contiguous()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to +/// load from shared memory and therefore must be initialized with a TensorRef +/// to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Element number when the layout crosses (in units of elements) + int Crosswise, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::TensorOpMultiplicandCrosswise::value, + Crosswise>, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand == Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for " + "A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Element number when the layout crosses + static int const kCrosswise = Crosswise; + + /// Layout of source tile + using Layout = cutlass::layout::TensorOpMultiplicandCrosswise< + sizeof_bits::value, kCrosswise>; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static_assert( + !(Shape::kContiguous % InstructionShape::kContiguous), + "Shape of warp-level Mma must be divisible by operator shape."); + + // Determine number of elements along outer dimension per individual LDSM op + static int const kLdsmOpOuter = Layout::kElementsPerAccess; + static int const kLdsmOpInner = 8; + + static_assert(!(Shape::kContiguous % kLdsmOpOuter), + "Shape of warp-level mma must be divisible by LDSM's " + "fundamental tile size."); + + static_assert(!(Shape::kStrided % kLdsmOpInner), + "Shape of warp-level mma must be divisible by LDSM's " + "fundamental tile size."); + + /// Shape of one individual LDSM instruction + static int const LdsmShapeContiguous = + InstructionShape::kContiguous / kLdsmOpOuter; + static int const LdsmShapeStrided = + ((4 / LdsmShapeContiguous * kLdsmOpInner) > Shape::kStrided) + ? (Shape::kStrided / kLdsmOpInner) + : (4 / LdsmShapeContiguous); + using LdsmShape = + layout::PitchLinearShape; + + /// Number and arrangement of LDSM instructions + using LdsmIterations = + layout::PitchLinearShape<1, Shape::kStrided / kLdsmOpInner / + LdsmShape::kStrided>; + + /// + static int const kGroupsPerTile = Layout::TileShape::kContiguous / + Layout::kFactor / LdsmShape::kContiguous; + }; + + private: + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Pointer type used for accesses + using AccessType = Array; + + public: + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + + private: + + /// Total number of sections. The memory is divided into stages. One stage + /// can store one tile. Stage is divided into sections. Interleaved layout + /// can have multiple sections in a stage. The rest layout only has one section + /// in a stage. + int sections_; + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + + /// Internal counter used to determine when to increment byte offset and when + /// to XOR it + int k_group_idx_; + + public: + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() + : pointer_(nullptr), + sections_(0), + stride_(0), + byte_offset_(0), + k_group_idx_(0) {} + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id) + : pointer_(reinterpret_cast(ref.data())), + sections_(ref.stride(0) / kCrosswise), + // stride_ = kCrosswise x sections_ x kFactor + stride_(ref.stride(0) * Layout::kFactor / Layout::kElementsPerAccess), + byte_offset_(0), + k_group_idx_(0) { + // Warp level iterator at most use double buffer to hide latency. If there + // are more than 2 sections, every stage should have more than 1 section. + + // Turing silicon requires all 32 threads in a warp provide valid addresses + // even for LDSM.1 and LDSM.2 +#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 750)) + lane_id = lane_id % (Policy::LdsmShape::kCount * Policy::kLdsmOpInner); +#endif + + int quad_quad = (lane_id >> 4); + int quad_pair = (lane_id >> 3); + int lane_in_pair = (lane_id & 1); + int lane_in_quad = (lane_id & 3); + int lane_in_quad_pair = (lane_id & 7); + int lane_in_quad_quad = (lane_id & 15); + + int partition_contiguous_idx = -1; + int access_contiguous_idx = -1; + int access_strided_idx = -1; + + if (Layout::kFactor == 4) { + // Super Integer matrix multiply Interleaved-32 + + int factor_in_partition = + (Layout::PartitionShape::kContiguous * Layout::kFactor / + Layout::TileShape::kContiguous); + + if (Policy::LdsmShape::kStrided == Policy::LdsmShape::kCount) { + // Integer matrix multiply 8816 A/B + partition_contiguous_idx = lane_in_quad / factor_in_partition; + access_contiguous_idx = ((lane_in_pair * factor_in_partition) ^ + (lane_in_quad_quad / Layout::kFactor)); + access_strided_idx = lane_id / Layout::kFactor; + } + else if (Policy::LdsmShape::kStrided == + (Policy::LdsmShape::kCount / 2) && + kOperand == Operand::kA) { + // Integer matrix multiply 16832 A + partition_contiguous_idx = lane_in_quad / factor_in_partition; + access_strided_idx = lane_in_quad_quad / Layout::kFactor; + access_contiguous_idx = + ((lane_in_pair * factor_in_partition + quad_quad) ^ + access_strided_idx); + } + else if (Policy::LdsmShape::kStrided == + (Policy::LdsmShape::kCount / 2) && + kOperand == Operand::kB) { + // Integer matrix multiply 16832 B + partition_contiguous_idx = lane_in_quad / factor_in_partition; + access_strided_idx = lane_in_quad_pair / Layout::kFactor + quad_quad * 2; + access_contiguous_idx = + ((lane_in_pair * factor_in_partition + ((lane_id & 8) >> 3)) ^ + access_strided_idx); + } + } else if (Layout::kFactor == 2) { + // Super Matrix multiply kBlock = 32 + if (Policy::LdsmShape::kStrided == Policy::LdsmShape::kCount) { + // Matrix multiply 1688 A/B + // (Q stands for 1 8x128bit block). + // Q0 + // Q1 + // Q2 + // Q3 + // Four blocks are next to each other in the strided dimension. + partition_contiguous_idx = (lane_id % Layout::kFactor); + access_contiguous_idx = (lane_in_quad_pair / Layout::kFactor); + access_strided_idx = lane_id / Layout::kFactor; + } + else if (Policy::LdsmShape::kStrided == + (Policy::LdsmShape::kCount / 2) && + kOperand == Operand::kA) { + // Matrix multiply 16816|1688.TF32 A + // Q0 Q2 + // Q1 Q3 + partition_contiguous_idx = (lane_id % Layout::kFactor); + access_contiguous_idx = + (quad_quad ^ (lane_in_quad_pair / Layout::kFactor)); + access_strided_idx = (lane_in_quad_quad / Layout::kFactor); + } else if (Policy::LdsmShape::kStrided == + (Policy::LdsmShape::kCount / 2) && + kOperand == Operand::kB) { + // Matrix multiply 16816|1688.TF32 B + // Q0 Q1 + // Q2 Q3 + partition_contiguous_idx = (lane_id % Layout::kFactor); + access_contiguous_idx = + ((quad_pair & 1) ^ (lane_in_quad_pair / Layout::kFactor)); + access_strided_idx = + (lane_in_quad_pair + (lane_id >> 4 << 3)) / Layout::kFactor; + } + else if (Policy::LdsmShape::kContiguous == Policy::LdsmShape::kCount) { + // Matrix multiply 16832.SP B + // Q0 Q1 Q2 Q3 + partition_contiguous_idx = (lane_id % Layout::kFactor); + access_contiguous_idx = + (quad_pair ^ (lane_in_quad_pair / Layout::kFactor)); + access_strided_idx = lane_in_quad_pair / Layout::kFactor; + } + } else if (Layout::kFactor == 1) { + // Super Matrix multiply kBlock = 64 + if (Policy::LdsmShape::kStrided == Policy::LdsmShape::kCount) { + // Q0 + // Q1 + // Q2 + // Q3 + partition_contiguous_idx = (lane_in_quad_pair >> 2); + access_contiguous_idx = lane_in_quad; + access_strided_idx = lane_id; + } + else if (Policy::LdsmShape::kStrided == + (Policy::LdsmShape::kCount / 2) && + kOperand == Operand::kA) { + // Matrix multiply 16816|1688.TF32 A + // Q0 Q2 + // Q1 Q3 + partition_contiguous_idx = (lane_in_quad_pair >> 2); + access_contiguous_idx = (quad_quad ^ lane_in_quad); + access_strided_idx = lane_in_quad_quad; + } else if (Policy::LdsmShape::kStrided == + (Policy::LdsmShape::kCount / 2) && + kOperand == Operand::kB) { + // Matrix multiply 16816|1688.TF32 B + // Q0 Q1 + // Q2 Q3 + partition_contiguous_idx = (lane_in_quad_pair >> 2); + access_contiguous_idx = ((quad_pair & 1) ^ lane_in_quad); + access_strided_idx = lane_in_quad_pair + (lane_id >> 4 << 3); + } + else if (Policy::LdsmShape::kContiguous == Policy::LdsmShape::kCount) { + // Matrix multiply 16832.SP B + // Q0 Q1 Q2 Q3 + partition_contiguous_idx = (lane_in_quad_pair >> 2); + access_contiguous_idx = (quad_pair ^ lane_in_quad); + access_strided_idx = lane_in_quad_pair; + } + } + + int access_contiguous = + partition_contiguous_idx * Layout::PartitionShape::kContiguous + + access_contiguous_idx; + + int access_strided = access_strided_idx; + + byte_offset_ = (access_contiguous + access_strided * stride_) * + sizeof_bits::value * Layout::kElementsPerAccess / 8; + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + byte_offset_ += offset * sizeof_bits::value / 8; + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset( + TensorCoord const &tile_offset) { + int whole_tiles = tile_offset.contiguous() / Policy::kGroupsPerTile; + int k_groups_delta = tile_offset.contiguous() % Policy::kGroupsPerTile; + + byte_offset_ ^= k_groups_delta * sizeof_bits::value * + Layout::kElementsPerAccess * + Policy::LdsmShape::kContiguous / 8; + pointer_ += + tile_offset.strided() * stride_ * Shape::kStrided / Layout::kFactor + + whole_tiles * stride_ / sections_; + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative( + TensorCoord const &tile_offset) { + + int whole_tiles = tile_offset.contiguous() / Policy::kGroupsPerTile; + int k_groups_delta = tile_offset.contiguous() % Policy::kGroupsPerTile; + if (k_groups_delta < 0) { + whole_tiles -= 1; + k_groups_delta += Policy::kGroupsPerTile; + } + + if ((Policy::kGroupsPerTile / kPartitionsK) >= 2) { + byte_offset_ ^= (k_groups_delta & 1) * Policy::LdsmShape::kContiguous * + sizeof_bits::value * + Layout::kElementsPerAccess / 8; + } + if ((Policy::kGroupsPerTile / kPartitionsK) >= 4) { + byte_offset_ ^= ((k_groups_delta + (k_group_idx_ & 1)) & 2) * + Policy::LdsmShape::kContiguous * + sizeof_bits::value * + Layout::kElementsPerAccess / 8; + } + if ((Policy::kGroupsPerTile / kPartitionsK) == 8) { + byte_offset_ ^= ((k_groups_delta + (k_group_idx_ & 3)) & 4) * + Policy::LdsmShape::kContiguous * + sizeof_bits::value * + Layout::kElementsPerAccess / 8; + } + + k_group_idx_ += k_groups_delta; + whole_tiles += k_group_idx_ / (Policy::kGroupsPerTile / kPartitionsK); + k_group_idx_ = k_group_idx_ % (Policy::kGroupsPerTile / kPartitionsK); + + pointer_ += + tile_offset.strided() * stride_ * Shape::kStrided / Layout::kFactor + + whole_tiles * stride_ / sections_; + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &operator++() { + + // Integer matrix multiply 16832 Interleaved-32 + // NONE + // Integer matrix multiply 16816 Interleaved-32 || Integer matrix multiply 16816 kblock=32 + + // Integer matrix multiply 8816 Interleaved-32 + // ^1 ^1 + // Matrix multiply 1684.TF32 kblock=16 || Integer matrix multiply 16816 kblock=64 + // Matrix multiply 1688 kblock=32 || Integer matrix multiply 8816 kblock=64 + // ^1 ^3 ^1 ^3 + // Matrix multiply 1688 kblock=64 + // ^1 ^3 ^1 ^7 ^1 ^3 ^1 ^7 + + // Matrix multiply 16816 kblock=32 | 1688.TF32 kblock=16 || Integer matrix multiply 16832 kblock=64 + // ^2 ^2 + // Matrix multiply 16816 kblock=64 | 1688.TF32 kblock=32 || Integer matrix multiply 16832 kblock=128 + // ^2 ^6 ^2 ^6 + + if ((Policy::kGroupsPerTile / kPartitionsK) > 1) { + int mask = ((Policy::kGroupsPerTile / kPartitionsK) == 8) + ? 3 + : (((Policy::kGroupsPerTile / kPartitionsK) == 4) ? 1 : 0); + + if (((k_group_idx_ & mask) % 2) == 0) + byte_offset_ ^= 1 * Policy::LdsmShape::kContiguous * + sizeof_bits::value * + Layout::kElementsPerAccess / 8; + else if ((k_group_idx_ & mask) == 1) + byte_offset_ ^= 3 * Policy::LdsmShape::kContiguous * + sizeof_bits::value * + Layout::kElementsPerAccess / 8; + else if ((k_group_idx_ & mask) == 3) + byte_offset_ ^= 7 * Policy::LdsmShape::kContiguous * + sizeof_bits::value * + Layout::kElementsPerAccess / 8; + } + + k_group_idx_++; + + if (k_group_idx_ == (Policy::kGroupsPerTile / kPartitionsK)) { + k_group_idx_ = 0; + add_tile_offset({Policy::kGroupsPerTile, 0}); + } + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &operator--() { assert(0); } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &operator+=( + TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &operator-=( + TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + Array *fetch_ptr = + reinterpret_cast *>(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) { + int access_idx = c + s * Policy::LdsmIterations::kContiguous; + + AccessType const *source_ptr = + pointer_ + Policy::LdsmShape::kContiguous * c + + Policy::kLdsmOpInner / Layout::kFactor * + Policy::LdsmShape::kStrided * s * stride_; + + char const *source_byte_ptr = + reinterpret_cast(source_ptr) + byte_offset + + byte_offset_; + + cutlass::arch::ldsm( + fetch_ptr[access_idx], source_byte_ptr); + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = tile_offset.contiguous() * + InstructionShape::kContiguous / + Layout::kElementsPerAccess + + tile_offset.strided() * Shape::kStrided * stride_; + + byte_offset += sizeof_bits::value * pointer_offset / 8; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + k_group_idx_ = k_group % (Policy::kGroupsPerTile / kPartitionsK); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to +/// load from shared memory and therefore must be initialized with a TensorRef +/// to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Element number when the layout crosses (in units of elements) + int Crosswise, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Crosswise>, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kB, + "MmaTensorOpMultiplicandIterator for ColumnMajor Crosswise may " + "only be instantiated for B operand to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// KBlock size + static int const kCrosswise = Crosswise; + + /// Layout of source tile + using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, kCrosswise>; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicandCrosswise::value, + kCrosswise>, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + + private: + /// Underlying tile iterator + Base iterator_; + + public: + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() {} + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id) + : iterator_({ref.data(), ref.stride()}, lane_id) {} + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset( + TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative( + TensorCoord const &tile_offset) { + iterator_.add_tile_offset_negative({tile_offset.row(), tile_offset.column()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &operator++() { + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &operator--() { + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &operator+=( + TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &operator-=( + TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { iterator_.load(frag); } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + assert(0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + assert(0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, {tile_offset.contiguous(), tile_offset.strided()}, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to +/// load from shared memory and therefore must be initialized with a TensorRef +/// to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Element number when the layout crosses (in units of elements) + int Crosswise, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Crosswise>, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA, + "MmaTensorOpMultiplicandIterator for RowMajor Crosswise may " + "only be instantiated for A operand to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Element number when the layout crosses + static int const kCrosswise = Crosswise; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, kCrosswise>; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicandCrosswise::value, + kCrosswise>, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + + private: + /// Underlying tile iterator + Base iterator_; + + public: + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() {} + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id) + : iterator_({ref.data(), ref.stride()}, lane_id) {} + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset( + TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative( + TensorCoord const &tile_offset) { + iterator_.add_tile_offset_negative({tile_offset.column(), tile_offset.row()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &operator++() { + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &operator--() { + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &operator+=( + TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &operator-=( + TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { iterator_.load(frag); } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + assert(0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + assert(0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Element type + typename Element_, + /// Layout of operand in memory + typename Layout_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions, concept: MatrixShape) + typename OpDelta_> +class MmaTensorOpAccumulatorTileIterator; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store +/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major +/// accumulator layout. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept | +/// WriteableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Element type + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions, concept: MatrixShape) + typename OpDelta_> +class MmaTensorOpAccumulatorTileIterator< + Shape_, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kC; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajor; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + using OpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static bool const kDivisible = + !(Shape::kRow % InstructionShape::kM) && + !(Shape::kColumn % InstructionShape::kN); + + static_assert(platform::is_same::value, + "Layouts must be defined for logical MatrixCoord coordinate space."); + + /// Number of mma operations performed + using MmaIterations = MatrixShape< + (Shape::kRow + InstructionShape::kM - 1) / InstructionShape::kM, + (Shape::kColumn + InstructionShape::kN - 1) / InstructionShape::kN + >; + }; + +private: + + // Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire + // shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements + // of that row. The accumulators within one row are assumed to be consecutive. + static int const kElementsPerAccess = InstructionShape::kN / 4; + static int const kRowsPerTile = 8; + static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = Array< + Element, + Policy::MmaIterations::kCount * InstructionShape::kMN / kThreads>; + +private: + + /// Reference to output tensor + TensorRef ref_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator( + TensorRef const &ref, + int lane_id + ): + ref_(ref) { + + int quad = (lane_id >> 2); + int lane_in_quad = (lane_id & 3); + + MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess); + + ref_.add_coord_offset(lane_offset); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator & operator++() { + // deliberate no-op + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator & operator--() { + // deliberate no-op + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + Fragment &frag, ///< fragment to load from the tensor + Index pointer_offset) const { ///< loads a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + + int mma_accum_start = kAccumulatorRows * kElementsPerAccess * + (mma_n * Policy::MmaIterations::kRow + mma_m); + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < kAccumulatorRows; ++row) { + CUTLASS_PRAGMA_UNROLL + for (int col = 0; col < kElementsPerAccess; ++col) { + int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + + row * kRowsPerTile; + int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; + + frag[mma_accum_start + row * kElementsPerAccess + col] = offset_ref.at({accum_m, accum_n}); + } + } + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + Fragment &frag, ///< fragment to load from the tensor + Index byte_offset) const { ///< loads a tile with a linear offset + + load_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles + + load(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles + Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset + + load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } + + /// Stores a fragment to memory + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index pointer_offset) const { ///< store a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + + int mma_accum_start = kAccumulatorRows * kElementsPerAccess * + (mma_n * Policy::MmaIterations::kRow + mma_m); + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < kAccumulatorRows; ++row) { + CUTLASS_PRAGMA_UNROLL + for (int col = 0; col < kElementsPerAccess; ++col) { + int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + + row * kRowsPerTile; + int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; + int idx = mma_accum_start + row * kElementsPerAccess + col; + + offset_ref.at({accum_m, accum_n}) = frag[idx]; + } + } + } + } + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_byte_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index byte_offset) const { ///< store a tile with a linear offset + + store_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Stores a fragment to memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + Fragment &frag, ///< fragment to store to the tensor + TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles + + store(frag, tile_offset, 0); + } + + /// Stores a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + /// fragment to store to the tensor + Fragment const &frag, + /// stores a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// stores a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store +/// accumulators from memory and is agnostic to layout. +/// +/// This iterator is not tested. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept | +/// WriteableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Element type + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions, concept: MatrixShape) + typename OpDelta_> +class MmaTensorOpAccumulatorTileIterator< + Shape_, Element_, cutlass::layout::AffineRankN<2>, InstructionShape_, OpDelta_> { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kC; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajor; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + using OpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static bool const kDivisible = + !(Shape::kRow % InstructionShape::kM) && + !(Shape::kColumn % InstructionShape::kN); + + static_assert(platform::is_same::value, + "Layouts must be defined for logical MatrixCoord coordinate space."); + + /// Number of mma operations performed + using MmaIterations = MatrixShape< + (Shape::kRow + InstructionShape::kM - 1) / InstructionShape::kM, + (Shape::kColumn + InstructionShape::kN - 1) / InstructionShape::kN + >; + }; + +private: + + // Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire + // shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements + // of that row. The accumulators within one row are assumed to be consecutive. + static int const kElementsPerAccess = InstructionShape::kN / 4; + static int const kRowsPerTile = 8; + static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = Array< + Element, + Policy::MmaIterations::kCount * InstructionShape::kMN / kThreads>; + +private: + + /// Reference to output tensor + TensorRef ref_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator( + TensorRef const &ref, + int lane_id + ): + ref_(ref) { + + int quad = (lane_id >> 2); + int lane_in_quad = (lane_id & 3); + + MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess); + + ref_.add_coord_offset(lane_offset); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator & operator++() { + // deliberate no-op + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator & operator--() { + // deliberate no-op + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + Fragment &frag, ///< fragment to load from the tensor + Index pointer_offset) const { ///< loads a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + + int mma_accum_start = kAccumulatorRows * kElementsPerAccess * + (mma_n * Policy::MmaIterations::kRow + mma_m); + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < kAccumulatorRows; ++row) { + CUTLASS_PRAGMA_UNROLL + for (int col = 0; col < kElementsPerAccess; ++col) { + int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + + row * kRowsPerTile; + int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; + + frag[mma_accum_start + row * kElementsPerAccess + col] = offset_ref.at({accum_m, accum_n}); + } + } + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + Fragment &frag, ///< fragment to load from the tensor + Index byte_offset) const { ///< loads a tile with a linear offset + + load_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles + + load(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles + Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset + + load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } + + /// Stores a fragment to memory + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index pointer_offset) const { ///< store a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + + int mma_accum_start = kAccumulatorRows * kElementsPerAccess * + (mma_n * Policy::MmaIterations::kRow + mma_m); + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < kAccumulatorRows; ++row) { + CUTLASS_PRAGMA_UNROLL + for (int col = 0; col < kElementsPerAccess; ++col) { + int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + + row * kRowsPerTile; + int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; + int idx = mma_accum_start + row * kElementsPerAccess + col; + + offset_ref.at({accum_m, accum_n}) = frag[idx]; + } + } + } + } + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_byte_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index byte_offset) const { ///< store a tile with a linear offset + + store_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Stores a fragment to memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + Fragment &frag, ///< fragment to store to the tensor + TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles + + store(frag, tile_offset, 0); + } + + /// Stores a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + /// fragment to store to the tensor + Fragment const &frag, + /// stores a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// stores a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store +/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major +/// accumulator layout. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept | +/// WriteableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Element type + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions, concept: MatrixShape) + typename OpDelta_> +class MmaTensorOpAccumulatorTileIterator { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kC; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::ColumnMajor; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + using OpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static bool const kDivisible = + !(Shape::kRow % InstructionShape::kM) && + !(Shape::kColumn % InstructionShape::kN); + + static_assert(platform::is_same::value, + "Layouts must be defined for logical MatrixCoord coordinate space."); + + /// Number of mma operations performed + using MmaIterations = MatrixShape< + (Shape::kRow + InstructionShape::kM - 1) / InstructionShape::kM, + (Shape::kColumn + InstructionShape::kN - 1) / InstructionShape::kN + >; + }; + +private: + + // Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire + // shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements + // of that row. The accumulators within one row are assumed to be consecutive. + static int const kElementsPerAccess = InstructionShape::kN / 4; + static int const kRowsPerTile = 8; + static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + +private: + + /// Reference to output tensor + TensorRef ref_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator( + TensorRef const &ref, + int lane_id + ): + ref_(ref) { + + int quad = (lane_id >> 2); + int lane_in_quad = (lane_id & 3); + + MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess); + + ref_.add_coord_offset(lane_offset); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator & operator++() { + // deliberate no-op + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator & operator--() { + // deliberate no-op + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + Fragment &frag, ///< fragment to load from the tensor + Index pointer_offset) const { ///< loads a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + + int mma_accum_start = kAccumulatorRows * kElementsPerAccess * + (mma_n * Policy::MmaIterations::kRow + mma_m); + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < kAccumulatorRows; ++row) { + CUTLASS_PRAGMA_UNROLL + for (int col = 0; col < kElementsPerAccess; ++col) { + int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + + row * kRowsPerTile; + int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; + int idx = mma_accum_start + row * kElementsPerAccess + col; + + frag[idx] = offset_ref.at({accum_m, accum_n}); + } + } + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + Fragment &frag, ///< fragment to load from the tensor + Index byte_offset) const { ///< loads a tile with a linear offset + + load_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles + + load(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles + Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset + + load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } + + /// Stores a fragment to memory + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index pointer_offset) const { ///< store a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + + int mma_accum_start = kAccumulatorRows * kElementsPerAccess * + (mma_n * Policy::MmaIterations::kRow + mma_m); + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < kAccumulatorRows; ++row) { + CUTLASS_PRAGMA_UNROLL + for (int col = 0; col < kElementsPerAccess; ++col) { + int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + + row * kRowsPerTile; + int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; + int idx = mma_accum_start + row * kElementsPerAccess + col; + + offset_ref.at({accum_m, accum_n}) = frag[idx]; + } + } + } + } + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_byte_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index byte_offset) const { ///< store a tile with a linear offset + + store_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Stores a fragment to memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + Fragment &frag, ///< fragment to store to the tensor + TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles + + store(frag, tile_offset, 0); + } + + /// Stores a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + /// fragment to store to the tensor + Fragment const &frag, + /// stores a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// stores a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store +/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major +/// accumulator layout. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept | +/// WriteableRandomAccessContiguousTileIteratorConcept +/// + +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Element typ + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions, concept: MatrixShape) + typename OpDelta_, + /// Interleaved N + int InterleavedN> +class MmaTensorOpAccumulatorTileIterator< + Shape_, Element_, cutlass::layout::ColumnMajorInterleaved, + InstructionShape_, OpDelta_> { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kC; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::ColumnMajorInterleaved; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + using OpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static_assert( + !(Shape::kRow % InstructionShape::kM) && + !(Shape::kColumn % InstructionShape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + + static_assert(platform::is_same::value, + "Layouts must be defined for logical MatrixCoord coordinate space."); + + /// Number of mma operations performed + using MmaIterations = MatrixShape; + }; + +private: + + static int const kElementsPerAccess = 2; + +public: + + // + // Derived quantities + // + + using AccessType = Array; + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + +private: + + /// Reference to output tensor + TensorRef ref_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator( + TensorRef const &ref, + int lane_id + ): + ref_(ref) { + + int quad = (lane_id >> 2); + int lane_in_quad = (lane_id & 3); + + MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess); + + ref_.add_coord_offset(lane_offset); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator & operator++() { + // deliberate no-op + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator & operator--() { + // deliberate no-op + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + Fragment &frag, ///< fragment to load from the tensor + Index pointer_offset) const { ///< loads a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + AccessType* frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + int accum_m = mma_m * InstructionShape::kM; + int accum_n = mma_n * InstructionShape::kN; + + int idx = mma_m + mma_n * Policy::MmaIterations::kRow; + + AccessType* access_ptr = reinterpret_cast(offset_ref.data() + + offset_ref.offset(TensorCoord(accum_m, accum_n))); + + frag_ptr[idx] = access_ptr[0]; + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + Fragment &frag, ///< fragment to load from the tensor + Index byte_offset) const { ///< loads a tile with a linear offset + + load_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles + + load(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles + Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset + + load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } + + /// Stores a fragment to memory + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index pointer_offset) const { ///< store a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + AccessType const *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + int accum_m = mma_m * InstructionShape::kM; + int accum_n = mma_n * InstructionShape::kN; + + int idx = mma_m + mma_n * Policy::MmaIterations::kRow; + + AccessType* access_ptr = reinterpret_cast(offset_ref.data() + + offset_ref.offset(TensorCoord(accum_m, accum_n))); + + access_ptr[0] = frag_ptr[idx]; + } + } + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_byte_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index byte_offset) const { ///< store a tile with a linear offset + + store_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Stores a fragment to memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + Fragment &frag, ///< fragment to store to the tensor + TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles + + store(frag, tile_offset, 0); + } + + /// Stores a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + /// fragment to store to the tensor + Fragment const &frag, + /// stores a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// stores a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store +/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major +/// accumulator layout. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept | +/// WriteableRandomAccessContiguousTileIteratorConcept +/// + +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Element typ + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions, concept: MatrixShape) + typename OpDelta_, + /// Interleaved N + int InterleavedN> +class MmaTensorOpAccumulatorTileIterator< + Shape_, Element_, cutlass::layout::TensorNCxHWx, + InstructionShape_, OpDelta_> { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kC; + + /// Element type + using Element = int8_t; + + /// Layout of source tile + using Layout = cutlass::layout::TensorNCxHWx; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + using OpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static_assert( + !(Shape::kRow % InstructionShape::kM) && + !(Shape::kColumn % InstructionShape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + + /// Number of elements in strided dimension that each STG writes + static int const kStridedPerSTG = 8; + + /// Factor to calculate reorder index to pack accumulator. + static int const kPackedFactor = Shape::kColumn / 32; + + /// Number of mma operations performed + using MmaIterations = MatrixShape; + }; + +private: + + static int const kElementsPerAccess = InterleavedN / 4; + +public: + + // + // Derived quantities + // + + struct alignas((kElementsPerAccess * sizeof_bits::value / 8)) AccessType { + Array storage; + }; + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + +private: + + /// Reference to output tensor + TensorRef ref_; + + /// Row offset index globally + LongIndex global_offset_row_; + + /// Column offset index globally + LongIndex global_offset_col_; + + /// Output tensor size + TensorCoord extent_; + + /// Alpha + float alpha_; + + /// Beta + float beta_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator( + TensorRef const &ref, + int const lane_id, + TensorCoord extent, + float alpha = 1.0f, + float beta = 0.0f + ): + ref_(ref), + extent_(extent), + alpha_(alpha), + beta_(beta) { + + int quad = (lane_id >> 2); + int lane_in_quad = (lane_id & 3); + + global_offset_row_ = quad; + + global_offset_col_ = lane_in_quad * kElementsPerAccess; + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator &add_tile_offset(MatrixCoord const &tile_offset) { + + global_offset_row_ += tile_offset.row() * Shape::kRow; + + global_offset_col_ += tile_offset.column() * Shape::kColumn; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator & operator++() { + // deliberate no-op + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator & operator--() { + // deliberate no-op + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + Fragment &frag, ///< fragment to load from the tensor + Index pointer_offset) const { ///< loads a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + AccessType* frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kN; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kM; ++mma_m) { + int accum_m = mma_m * InstructionShape::kM; + int accum_n = mma_n * InstructionShape::kN; + + int idx = mma_m + mma_n * Policy::MmaIterations::kM; + + AccessType* access_ptr = reinterpret_cast(offset_ref.data() + + accum_m * offset_ref.stride(0) + accum_n); + + frag_ptr[idx] = access_ptr[0]; + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + Fragment &frag, ///< fragment to load from the tensor + Index byte_offset) const { ///< loads a tile with a linear offset + + load_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles + + load(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles + Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset + + load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } + + /// Stores a fragment to memory + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index pointer_offset) const { ///< store a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + Array output_frag_f; + Array output_frag; + + LongIndex pq = extent_.h() * extent_.w(); + + LongIndex extent_row = extent_.n() * pq; + LongIndex extent_col = extent_.c(); + + LongIndex k_major = (global_offset_col_ / InterleavedN) * pq; + Index k_minor = global_offset_col_ % InterleavedN; + LongIndex k_offset = k_major * InterleavedN + k_minor; + LongIndex k_offset_delta = pq * InterleavedN; + + LongIndex stride_n = pq * extent_.c(); + + Index n; + LongIndex pq_rem; + + unsigned int pq_mul, pq_shr; + find_divisor(pq_mul, pq_shr, pq); + + if(beta_ == 0.0f) { + CUTLASS_PRAGMA_UNROLL + for(int i = 0; i < frag.size(); ++i) { + output_frag_f[i] = frag[i]; + } + + if(InstructionShape::kM == Policy::kStridedPerSTG) { + CUTLASS_PRAGMA_UNROLL + for(int i = 0; i < frag.size(); ++i) { + output_frag[i] = (Element)(output_frag_f[i] * alpha_); + } + } else { + CUTLASS_PRAGMA_UNROLL + for(int i = 0; i < frag.size(); ++i) { + int map_i = (i / (16 * Policy::kPackedFactor)) * (16 * Policy::kPackedFactor) + + (i % (8 * Policy::kPackedFactor)) / 2 * 4 + + (i % (8 * Policy::kPackedFactor)) % 2 + + (i / (8 * Policy::kPackedFactor)) % 2 * 2; + output_frag[i] = (Element)(output_frag_f[map_i] * alpha_); + } + } + + AccessType const *frag_ptr = reinterpret_cast(&output_frag); + + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + int accum_m = mma_m * Policy::kStridedPerSTG; + + fast_divmod(n, pq_rem, global_offset_row_ + accum_m, pq, pq_mul, pq_shr); + LongIndex offset_m = n * stride_n + k_offset + pq_rem * InterleavedN; + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + + int accum_n = mma_n * InterleavedN; + + int idx = mma_n + mma_m * Policy::MmaIterations::kColumn; + + if((global_offset_row_ + accum_m < extent_row) && (global_offset_col_ + accum_n < extent_col)) { + AccessType* access_ptr = reinterpret_cast(offset_ref.data() + + offset_m + mma_n * k_offset_delta); + + access_ptr[0] = frag_ptr[idx]; + } + } + } + } else { + if(InstructionShape::kM == Policy::kStridedPerSTG) { + CUTLASS_PRAGMA_UNROLL + for(int i = 0; i < frag.size(); ++i) { + output_frag_f[i] = frag[i]; + } + } else { + CUTLASS_PRAGMA_UNROLL + for(int i = 0; i < frag.size(); ++i) { + int map_i = (i / (16 * Policy::kPackedFactor)) * (16 * Policy::kPackedFactor) + + (i % (8 * Policy::kPackedFactor)) / 2 * 4 + + (i % (8 * Policy::kPackedFactor)) % 2 + + (i / (8 * Policy::kPackedFactor)) % 2 * 2; + output_frag_f[i] = frag[map_i]; + } + } + + AccessType const *frag_ptr = reinterpret_cast(&output_frag); + + Array ref_frag; + AccessType *ref_frag_ptr = reinterpret_cast(&ref_frag); + + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + int accum_m = mma_m * Policy::kStridedPerSTG; + + fast_divmod(n, pq_rem, global_offset_row_ + accum_m, pq, pq_mul, pq_shr); + LongIndex offset_m = n * stride_n + k_offset + pq_rem * InterleavedN; + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + + int accum_n = mma_n * InterleavedN; + + int idx = mma_n + mma_m * Policy::MmaIterations::kColumn; + + if((global_offset_row_ + accum_m < extent_row) && (global_offset_col_ + accum_n < extent_col)) { + AccessType* access_ptr = reinterpret_cast(offset_ref.data() + + offset_m + mma_n * k_offset_delta); + + ref_frag_ptr[0] = access_ptr[0]; + + CUTLASS_PRAGMA_UNROLL + for(int i = 0; i < kElementsPerAccess; ++i) { + output_frag[idx * kElementsPerAccess + i] = Element(alpha_ * output_frag_f[idx * kElementsPerAccess + i] + + beta_ * ref_frag[i]); + } + + access_ptr[0] = frag_ptr[idx]; + } + } + } + } + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_byte_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index byte_offset) const { ///< store a tile with a linear offset + + store_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Stores a fragment to memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + Fragment &frag, ///< fragment to store to the tensor + TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles + + store(frag, tile_offset, 0); + } + + /// Stores a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + /// fragment to store to the tensor + Fragment const &frag, + /// stores a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// stores a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h new file mode 100644 index 0000000000000000000000000000000000000000..b79b43e72808ae2dd21d72fca4909cce1121afd8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h @@ -0,0 +1,3098 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/gemm/gemm.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm70.h" + +#include "cutlass/platform/platform.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Operand identity + Operand Operand, + /// Data type of A elements + typename Element_, + /// Layout of operand + typename Layout_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + int OpDelta_, + /// Number of threads participating in one matrix operation + int Threads> +class MmaVoltaTensorOpMultiplicandTileIterator; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_> +class MmaVoltaTensorOpMultiplicandTileIterator< + Shape_, Operand::kA, Element_, + cutlass::layout::VoltaTensorOpMultiplicandCongruous< + sizeof_bits::value>, + InstructionShape_, OpDelta_, 32> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kA; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::VoltaTensorOpMultiplicandCongruous::value>; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static_assert( + !(Shape::kContiguous % InstructionShape::kContiguous), + "Shape of warp-level Mma must be divisible by operator shape."); + + // Shape of one individual LDS.128 + // TODO: 32 and 4 are hardcoded, 32-by-4 is logical shape + using LdsShape = layout::PitchLinearShape< + 32, + 4 + >; + + // LdsShapes are arranged in the strided direction in SMEM + using LdsIterations = layout::PitchLinearShape< + InstructionShape::kStrided / LdsShape::kStrided, + Shape::kContiguous / LdsShape::kContiguous + >; + }; + +private: + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Number of internal pointers needed to reference shared memory + static int const kPointerCount = 2; + + /// Pointer type used for accesses + using AccessType = AlignedArray; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + +private: + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_[kPointerCount]; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): + stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0) { + // swizzle patterns for operandA LDS are + // 1. (tid[4] << 3) | (tid[2:0] ^ tid[4]) + // 2. (tid[4] << 3) | (tid[2:0] ^ tid[4] ^ 0b10010) + + int vec_row = (lane_id >> 4); // tid[4] + int vec_col = ((lane_id & 4) >> 2); // tid[2] + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPointerCount; ++i) { + + if(i == 1) { + vec_row |= 2; + } + int access_contiguous_idx = (vec_col << 2) | ((lane_id & 3) ^ vec_row); + int access_contiguous = access_contiguous_idx; + + int access_strided = vec_row; + pointer_[i] = reinterpret_cast(ref.data()) + + access_contiguous + access_strided * stride_; + } + + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + byte_offset_ += offset * sizeof(Element); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + int contiguous_offset = tile_offset.contiguous(); + int strided_offset = tile_offset.strided(); + + // To support 32x32 tile size + if (Shape::kContiguous == Policy::LdsShape::kContiguous) { + if (contiguous_offset % 2) { + AccessType const *tmp_pointer = pointer_[0]; + pointer_[0] = pointer_[1]; + pointer_[1] = tmp_pointer; + } + contiguous_offset = contiguous_offset / 2 * 2; + } + + int offset = (strided_offset * InstructionShape::kStrided) * stride_ * + Layout::kElementsPerAccess + + contiguous_offset * Shape::kContiguous; + + add_pointer_offset(offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator++() { + byte_offset_ += stride_ * InstructionShape::kStrided * sizeof(Element) * + Layout::kElementsPerAccess; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator--() { + byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) * + Layout::kElementsPerAccess; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_byte_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + AccessType * fetch_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) { + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) { + + int access_idx = c + s * Policy::LdsIterations::kContiguous; + + AccessType const *source_ptr = pointer_[s & 1] + + Policy::LdsShape::kContiguous * c + + Policy::LdsShape::kStrided * (s / 2) * stride_; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + byte_offset + byte_offset_; + fetch_ptr[access_idx] = *(reinterpret_cast (source_byte_ptr)); + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = + tile_offset.contiguous() * Shape::kContiguous / + Layout::kElementsPerAccess + + tile_offset.strided() * InstructionShape::kStrided * stride_; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation here + } +}; + +////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_> + +class MmaVoltaTensorOpMultiplicandTileIterator< + Shape_, Operand::kB, Element_, + cutlass::layout::VoltaTensorOpMultiplicandBCongruous< + sizeof_bits::value>, + InstructionShape_, OpDelta_, 32> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kB; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::VoltaTensorOpMultiplicandBCongruous::value>; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static_assert( + !(Shape::kContiguous % InstructionShape::kContiguous), + "Shape of warp-level Mma must be divisible by operator shape."); + + // Shape of one individual LDS + // TODO: remove hardcoded 32 and 4 + using LdsShape = layout::PitchLinearShape< + 32, + 4 + >; + + using LdsIterations = layout::PitchLinearShape< + Shape::kContiguous / LdsShape::kContiguous, + InstructionShape::kStrided / LdsShape::kStrided + >; + }; + +private: + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Pointer type used for accesses + using AccessType = AlignedArray; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile, needs on more time number of registers + using Fragment = Array; + +private: + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): + stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0) { + + // swizzle pattern is (tid & (3 << 3) | (tid[1:0] ^ tid[4:3])) + int access_strided = (lane_id >> 3) & 0x3; + int access_contiguous = ((lane_id ^ (lane_id >> 3)) & 0x3); + + pointer_ = reinterpret_cast(ref.data()) + + access_contiguous + access_strided * stride_; + + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + byte_offset_ += offset * sizeof(Element); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + int contiguous_offset = tile_offset.contiguous(); + int strided_offset = tile_offset.strided(); + + int offset = (strided_offset * InstructionShape::kStrided) * stride_ * + Layout::kElementsPerAccess + + contiguous_offset * Shape::kContiguous; + + add_pointer_offset(offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator++() { + byte_offset_ += stride_ * InstructionShape::kStrided * sizeof(Element) * + Layout::kElementsPerAccess; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator--() { + byte_offset_ += stride_ * InstructionShape::kStrided * sizeof(Element) * + Layout::kElementsPerAccess; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_byte_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + AccessType * fetch_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) { + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) { + + int access_idx = c + s * Policy::LdsIterations::kContiguous; + + AccessType const *source_ptr = pointer_ + + Policy::LdsShape::kContiguous / Layout::kElementsPerAccess * c + + Policy::LdsShape::kStrided * s * stride_; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + byte_offset + byte_offset_; + fetch_ptr[access_idx] = *(reinterpret_cast (source_byte_ptr)); + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = + tile_offset.contiguous() * Shape::kContiguous / + Layout::kElementsPerAccess + + tile_offset.strided() * InstructionShape::kStrided * stride_; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation here + } +}; + +////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared +/// memory and therefore must be initialized with a TensorRef to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_> +class MmaVoltaTensorOpMultiplicandTileIterator< + Shape_, Operand::kA, Element_, + cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous< + sizeof_bits::value>, + InstructionShape_, OpDelta_, 32> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kA; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous::value>; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaVoltaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::VoltaTensorOpMultiplicandCongruous::value>, + layout::PitchLinearShape, + kOpDelta, kThreads>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.contiguous(), tile_offset.strided()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared +/// memory and therefore must be initialized with a TensorRef to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_> +class MmaVoltaTensorOpMultiplicandTileIterator< + Shape_, Operand::kB, Element_, + cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous< + sizeof_bits::value>, + InstructionShape_, OpDelta_, 32> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kB; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous::value>; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaVoltaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::VoltaTensorOpMultiplicandBCongruous::value>, + layout::PitchLinearShape, + kOpDelta, kThreads>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.strided(), tile_offset.contiguous()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store +/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major +/// accumulator layout. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept | +/// WriteableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Layout of operand in memory + typename Layout_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions, concept: MatrixShape) + typename OpDelta_> +class MmaVoltaTensorOpAccumulatorTileIterator { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kC; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = Layout_; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + using OpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + + /// Volta Tensor Op uses 32x32 interleaved tile + using InterleavedTile = MatrixShape<32, 32>; + + static_assert(!(Shape::kRow % InterleavedTile::kRow) && !(Shape::kColumn % InterleavedTile::kColumn), + "Shape of warp-level Mma must be divisible by operator shape."); + + static_assert(platform::is_same::value, + "Layouts must be defined for logical MatrixCoord coordinate space."); + + /// Number of mma operations performed + using TileIterations = MatrixShape< + Shape::kRow / InterleavedTile::kRow, + Shape::kColumn / InterleavedTile::kColumn + >; + + using MmaIterations = + MatrixShape; + }; + +private: + + // Assume accumulator tile is multipile interleaved 32x32 tile. + static int const kElementsPerPartial = 4; + using EleShapePerPatial = typename platform::conditional< + platform::is_same::value, + MatrixShape<2, 2>, + MatrixShape<1, 4> >::type; + static int const kElementsPerMma = 8; + static int const kAccumulatorPatials = 2; + using QuadShapePerPatialMma = MatrixShape<4, 4>; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + +private: + + /// Reference to output tensor + TensorRef ref_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpAccumulatorTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpAccumulatorTileIterator( + TensorRef const &ref, + int lane_id + ): + ref_(ref) { + + int quad = (lane_id >> 2); + int lane_in_quad = (lane_id & 3); + int accum_m, accum_n; + + if (platform::is_same::value) { + // (quad[2],quad[0])+lane_in_quad[0] + accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1); + // (quad[1])+lane_in_quad[1] + accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials + + (lane_in_quad & 2); + } else { + accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + lane_in_quad; // (quad[2],quad[0]) + accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials; + } + MatrixCoord lane_offset(accum_m, accum_n); + + ref_.add_coord_offset(lane_offset); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpAccumulatorTileIterator & operator++() { + // deliberate no-op + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpAccumulatorTileIterator & operator--() { + // deliberate no-op + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_HOST_DEVICE + void load_with_pointer_offset( + Fragment &frag, ///< fragment to load from the tensor + Index pointer_offset) const { ///< loads a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + CUTLASS_PRAGMA_UNROLL + for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) { + CUTLASS_PRAGMA_UNROLL + for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) { + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + + int mma_accum_start = + (((tile_n * Policy::TileIterations::kRow + tile_m) * + Policy::MmaIterations::kColumn + mma_n) * + Policy::MmaIterations::kRow + mma_m) * + kElementsPerMma; + + CUTLASS_PRAGMA_UNROLL + for (int p = 0; p < kAccumulatorPatials; ++p) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < EleShapePerPatial::kRow; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < EleShapePerPatial::kColumn; ++n) { + int accum_m = tile_m * Policy::InterleavedTile::kRow + + mma_m * QuadShapePerPatialMma::kRow + m * 2; + int accum_n = tile_n * Policy::InterleavedTile::kColumn + + mma_n * QuadShapePerPatialMma::kColumn + + p * Policy::InterleavedTile::kColumn/2 + n; + int idx = mma_accum_start + p * kElementsPerPartial + + m * EleShapePerPatial::kColumn + n; + frag[idx] = offset_ref.at({accum_m, accum_n}); + } + } + } + } + } + } + } + } + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + Fragment &frag, ///< fragment to load from the tensor + Index byte_offset) const { ///< loads a tile with a linear offset + + load_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_HOST_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles + + load(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_HOST_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles + Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset + + load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } + + /// Stores a fragment to memory + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_HOST_DEVICE + void store_with_pointer_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index pointer_offset) const { ///< store a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + CUTLASS_PRAGMA_UNROLL + for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) { + CUTLASS_PRAGMA_UNROLL + for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) { + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + + int mma_accum_start = + (((tile_n * Policy::TileIterations::kRow + tile_m) * + Policy::MmaIterations::kColumn + mma_n) * + Policy::MmaIterations::kRow + mma_m) * + kElementsPerMma; + + CUTLASS_PRAGMA_UNROLL + for (int p = 0; p < kAccumulatorPatials; ++p) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < EleShapePerPatial::kRow; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < EleShapePerPatial::kColumn; ++n) { + int accum_m = tile_m * Policy::InterleavedTile::kRow + + mma_m * QuadShapePerPatialMma::kRow + m * 2; + int accum_n = tile_n * Policy::InterleavedTile::kColumn + + mma_n * QuadShapePerPatialMma::kColumn + + p * Policy::InterleavedTile::kColumn/2 + n; + int idx = mma_accum_start + p * kElementsPerPartial + + m * EleShapePerPatial::kColumn + n; + offset_ref.at({accum_m, accum_n}) = frag[idx]; + } + } + } + } + } + } + } + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_HOST_DEVICE + void store_with_byte_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index byte_offset) const { ///< store a tile with a linear offset + + store_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Stores a fragment to memory with logical offset in units of whole tiles. + CUTLASS_HOST_DEVICE + void store( + Fragment &frag, ///< fragment to store to the tensor + TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles + + store(frag, tile_offset, 0); + } + + /// Stores a fragment from memory with logical offset in units of whole tiles. + CUTLASS_HOST_DEVICE + void store( + /// fragment to store to the tensor + Fragment const &frag, + /// stores a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// stores a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } +}; + +/// This tile iterator is specialized for 32-thread TensorOps. It uses LDS to +/// load from shared memory and therefore must be initialized with a TensorRef +/// to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// KBlock size (in units of elements) + int KBlock> +class MmaVoltaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::VoltaTensorOpMultiplicandCrosswise< + sizeof_bits::value, KBlock>, + InstructionShape_, OpDelta_, 32> { + public: + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand == Operand::kB, + "MmaVoltaTensorOpMultiplicandIterator may only be instantiated for " + "A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// KBlock size + static int const kKBlock = KBlock; + + /// Layout of source tile + using Layout = cutlass::layout::VoltaTensorOpMultiplicandCrosswise< + sizeof_bits::value, kKBlock>; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + + /// Shape of one individual LDS instruction + using LdsShape = layout::PitchLinearShape<1, 32>; + + /// Number and arrangement of LDSM instructions + using LdsIterations = layout::PitchLinearShape<1, Shape::kStrided / 32>; + + /// Using LDS.128 + static int const kElementsPerAccess = 8; + + /// Contiguous elements per line + static int const kContiguousElementsPerLine = 4; + }; + + private: + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Pointer type used for accesses + using AccessType = AlignedArray; + + public: + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + + private: + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + + /// Crosswised elements are arranged in a SMEM line + /// in units of AccessType + Index line_size; + + /// Internal counter used to determine load addr offset + /// and when to swap higher 64bit with lower 64bit + int k_group_idx_; + + public: + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator() + : pointer_(nullptr), + stride_(0), + line_size(0), + byte_offset_(0), + k_group_idx_(0) {} + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id) + : pointer_(reinterpret_cast(ref.data())), + stride_(ref.stride(0) * Policy::kElementsPerAccess), + line_size((ref.stride(0) * Policy::kContiguousElementsPerLine) / + Policy::kElementsPerAccess), + k_group_idx_(0), + byte_offset_(0) { + + int quad = (lane_id / 4); + int lane_in_quad = (lane_id % 4); + int access_contiguous; + + if(kOperand == Operand::kA) { + + // swizzle id: tid[4]|tid[1:0]|(tid[2]^tid[4]) + access_contiguous = ((quad & 0x4) << 1) + ((lane_in_quad) << 1) + + ((quad & 0x1) ^ ((quad & 0x4) >> 2)); + } else { + + // swizzle id: tid[4]|tid[1:0]|tid[3] + access_contiguous = ((quad & 0x4) << 1) + (lane_in_quad << 1) + + ((quad & 0x2) >> 1 ^ ((quad & 0x4) >> 2)); + } + + byte_offset_ = access_contiguous * + sizeof(Element) * Policy::kElementsPerAccess; + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + byte_offset_ += offset * sizeof(Element); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset( + TensorCoord const &tile_offset) { + + int contiguous_offset = tile_offset.contiguous(); + int strided_offset = tile_offset.strided(); + k_group_idx_ = 0; + + pointer_ += contiguous_offset * + (InstructionShape::kContiguous / + Policy::kContiguousElementsPerLine) * + line_size + + strided_offset * Shape::kStrided / 2; + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &operator++() { + k_group_idx_ = (k_group_idx_ + 1) % 8; + + if (k_group_idx_ == 4 || k_group_idx_ == 0) { + byte_offset_ ^= 1 * sizeof(Element) * Policy::kElementsPerAccess; + } + + pointer_ += line_size; + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &operator--() { assert(0); } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &operator+=( + TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &operator-=( + TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + AccessType * fetch_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) { + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) { + + int access_idx = c + s * Policy::LdsIterations::kContiguous; + + AccessType const *source_ptr = pointer_ + + Policy::LdsShape::kContiguous * c * line_size + + Policy::LdsShape::kStrided * s / 2; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + byte_offset + byte_offset_; + fetch_ptr[access_idx] = *(reinterpret_cast (source_byte_ptr)); + + // swap higher 64bit and lower 64bit + if (k_group_idx_ & 0x2) { + uint64_t *low = reinterpret_cast(&frag) + access_idx * 2; + uint64_t *high = reinterpret_cast(&frag) + access_idx * 2 + 1; + uint64_t tmp = *low; + *low = *high; + *high = tmp; + } + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = tile_offset.contiguous() * + InstructionShape::kContiguous / + Policy::kElementsPerAccess + + tile_offset.strided() * Shape::kStrided * stride_; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + k_group_idx_ = k_group; + } +}; + +/// This tile iterator is specialized for 32-thread TensorOps. It uses LDS to +/// load from shared memory and therefore must be initialized with a TensorRef +/// to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// KBlock size (in units of elements) + int KBlock> +class MmaVoltaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise< + sizeof_bits::value, KBlock>, + InstructionShape_, OpDelta_, 32> { + public: + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand == Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for " + "A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// KBlock size + static int const kKBlock = KBlock; + + + /// Layout of source tile + using Layout = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise< + sizeof_bits::value, kKBlock>; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaVoltaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::VoltaTensorOpMultiplicandCrosswise::value, + kKBlock>, + layout::PitchLinearShape, + kOpDelta, kThreads>; + + public: + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + + private: + /// Underlying tile iterator + Base iterator_; + + public: + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator() {} + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id) + : iterator_({ref.data(), ref.stride()}, lane_id) {} + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset( + TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &operator++() { + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &operator--() { + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &operator+=( + TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &operator-=( + TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { iterator_.load(frag); } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + assert(0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + assert(0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, {tile_offset.contiguous(), tile_offset.strided()}, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It uses LDS to +/// load from shared memory and therefore must be initialized with a TensorRef +/// to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// KBlock size (in units of elements) + int KBlock> +class MmaVoltaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise< + sizeof_bits::value, KBlock>, + InstructionShape_, OpDelta_, 32> { + public: + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand == Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for " + "A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// KBlock size + static int const kKBlock = KBlock; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise< + sizeof_bits::value, kKBlock>; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaVoltaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::VoltaTensorOpMultiplicandCrosswise::value, + kKBlock>, + layout::PitchLinearShape, + kOpDelta, kThreads>; + + public: + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + + private: + /// Underlying tile iterator + Base iterator_; + + public: + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator() {} + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id) + : iterator_({ref.data(), ref.stride()}, lane_id) {} + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset( + TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &operator++() { + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &operator--() { + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &operator+=( + TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator &operator-=( + TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { iterator_.load(frag); } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + assert(0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + assert(0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for 'TN' arrangement +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Operand identity + Operand Operand_, + /// Data type of A elements + typename Element_, + /// Layout of matrix operand + typename Layout_, + /// Shape of one matrix production operation (concept: MatrixShape) + typename InstructionShape_, + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + int OpDelta_, + /// Number of threads participating in one matrix operation + int Threads = 32, + /// Number of partitions along K dimension + int PartitionsK_ = 1> +class MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + /// Basic check + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaVoltaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = Layout_; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Number of elements accessed per Shared Memory load + static int const kElementsPerAccess = 4; + +private: + + static int const kInterleavedTileRows = 32; + static int const kInterleavedTileColumns = 32; + static int const kInstructionsPerTile = 2; + + /// Rounded up instruction counts + using TileCount = MatrixShape< + Shape::kRow / kInterleavedTileRows, + Shape::kColumn / kInterleavedTileColumns + >; + + using FragmentCount = MatrixShape< + TileCount::kRow * kInstructionsPerTile, + TileCount::kColumn * kInstructionsPerTile + >; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = Array< + Element, + (kOperand == Operand::kA ? FragmentCount::kRow : FragmentCount::kColumn) * kElementsPerAccess + >; + + /// Memory access type + using AccessType = AlignedArray; + +private: + + /// Underlying tensor reference + TensorRef ref_; + + /// Extent of tensor + MatrixCoord extent_; + + /// Origin + MatrixCoord origin_; + + /// Used to conditionally enable extents checking + bool divisible_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner(): divisible_(true) { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner( + TensorRef const &ref, + int lane_id + ): + ref_(ref), extent_(Shape::kRow, Shape::kColumn), divisible_(true) { + + int quad_id = lane_id / 4; + int lane_in_quad = (lane_id % 4); + + if (kOperand == Operand::kA) { + + int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile + lane_in_quad; + int col_idx = 0; + + origin_ = MatrixCoord(row_idx, col_idx); + } + else { + + int row_idx = 0; + int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile + lane_in_quad; + + origin_ = MatrixCoord(row_idx, col_idx); + } + + ref_.add_coord_offset(origin_); + } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner( + TensorRef const &ref, + TensorCoord extent, + int lane_id + ): ref_(ref), extent_(extent), divisible_(false) { + + int quad_id = lane_id / 4; + int lane_in_quad = (lane_id % 4); + + if (kOperand == Operand::kA) { + + int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile + lane_in_quad; + int col_idx = 0; + + origin_ = MatrixCoord(row_idx, col_idx); + } + else { + + int row_idx = 0; + int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile + lane_in_quad; + + origin_ = MatrixCoord(row_idx, col_idx); + } + + #if defined(__CUDA_ARCH__) + __syncthreads(); + #endif + + ref_.add_coord_offset(origin_); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner &add_pointer_offset(LongIndex offset) { + + ref_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner &add_tile_offset(TensorCoord const &tile_offset) { + + TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); + origin_ += coord_offset; + + ref_.add_coord_offset(coord_offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator++() { + + if (kOperand == Operand::kA) { + add_tile_offset({0, 1}); + } + else { + add_tile_offset({1, 0}); + } + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator--() { + + if (kOperand == Operand::kA) { + add_tile_offset({0, -1}); + } + else { + add_tile_offset({-1, 0}); + } + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_pointer_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + + AccessType *frag_ptr = reinterpret_cast(&frag); + AccessType const *access_ptr = reinterpret_cast(ref_.data()); + int ldm = ref_.stride()[0]; + + if (kOperand == Operand::kA) { + + CUTLASS_PRAGMA_UNROLL + for (int idx = 0; idx < FragmentCount::kRow; ++idx) { + + int tile_idx = idx / 2; + int quad_idx = idx % 2; + + int row_offset = tile_idx * kInterleavedTileRows + quad_idx * 4; + frag_ptr[idx] = access_ptr[row_offset * ldm / kElementsPerAccess]; + } + } + else { + CUTLASS_PRAGMA_UNROLL + for (int idx = 0; idx < FragmentCount::kColumn; ++idx) { + + int tile_idx = idx / 2; + int quad_idx = idx % 2; + + int col_offset = tile_idx * kInterleavedTileColumns + quad_idx * 4; + frag_ptr[idx] = access_ptr[col_offset * ldm / kElementsPerAccess]; + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + + load_with_pointer_offset(frag, byte_offset * 8 / sizeof_bits::value); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + + TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); + + load_with_pointer_offset(frag, ref_.offset(coord_offset)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + + TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); + + load_with_pointer_offset(frag, ref_.offset(coord_offset) + pointer_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + + TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); + + load_with_pointer_offset(frag, ref_.offset(coord_offset) + byte_offset * 8 / sizeof_bits::value); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation + } +}; + + +/// Tile iterator specialized for 'NT' arrangement +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Operand identity + Operand Operand_, + /// Data type of A elements + typename Element_, + /// Layout of matrix operand + typename Layout_, + /// Shape of one matrix production operation (concept: MatrixShape) + typename InstructionShape_, + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + int OpDelta_, + /// Number of threads participating in one matrix operation + int Threads = 32, + /// Number of partitions along K dimension + int PartitionsK_ = 1> +class MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + /// Basic check + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaVoltaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = Layout_; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Number of elements accessed per Shared Memory load + static int const kElementsPerAccess = 4; + +private: + + static int const kInterleavedTileRows = 32; + static int const kInterleavedTileColumns = 32; + static int const kInstructionsPerTile = 2; + + /// Rounded up instruction counts + using TileCount = MatrixShape< + Shape::kRow / kInterleavedTileRows, + Shape::kColumn / kInterleavedTileColumns + >; + + using FragmentCount = MatrixShape< + TileCount::kRow * kInstructionsPerTile, + TileCount::kColumn * kInstructionsPerTile + >; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = Array< + Element, + (kOperand == Operand::kA ? FragmentCount::kRow : FragmentCount::kColumn) * kElementsPerAccess + >; + + /// Memory access type + using AccessType = AlignedArray; + +private: + + /// Underlying tensor reference + TensorRef ref_; + + /// Extent of tensor + MatrixCoord extent_; + + /// Origin + MatrixCoord origin_; + + /// Used to conditionally enable extents checking + bool divisible_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter(): divisible_(true) { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter( + TensorRef const &ref, + int lane_id + ): + ref_(ref), extent_(Shape::kRow, Shape::kColumn), divisible_(true) { + + int quad_id = lane_id / 4; + int lane_in_quad = (lane_id % 4); + + if (kOperand == Operand::kA) { + + int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile; + int col_idx = lane_in_quad; + + origin_ = MatrixCoord(row_idx, col_idx); + } + else { + + int row_idx = lane_in_quad; + int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile; + + origin_ = MatrixCoord(row_idx, col_idx); + } + + ref_.add_coord_offset(origin_); + } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter( + TensorRef const &ref, + TensorCoord extent, + int lane_id + ): ref_(ref), extent_(extent), divisible_(false) { + + int quad_id = lane_id / 4; + int lane_in_quad = (lane_id % 4); + + if (kOperand == Operand::kA) { + + int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile; + int col_idx = lane_in_quad; + + origin_ = MatrixCoord(row_idx, col_idx); + } + else { + + int row_idx = lane_in_quad; + int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile; + + origin_ = MatrixCoord(row_idx, col_idx); + } + + #if defined(__CUDA_ARCH__) + __syncthreads(); + #endif + + ref_.add_coord_offset(origin_); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter &add_pointer_offset(LongIndex offset) { + + ref_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter &add_tile_offset(TensorCoord const &tile_offset) { + + TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); + origin_ += coord_offset; + + ref_.add_coord_offset(coord_offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator++() { + + if (kOperand == Operand::kA) { + add_tile_offset({0, 1}); + } + else { + add_tile_offset({1, 0}); + } + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator--() { + + if (kOperand == Operand::kA) { + add_tile_offset({0, -1}); + } + else { + add_tile_offset({-1, 0}); + } + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_pointer_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + + AccessType *frag_ptr = reinterpret_cast(&frag); + AccessType const *access_ptr = reinterpret_cast(ref_.data()); + int ldm = ref_.stride()[0]; + + if (kOperand == Operand::kA) { + + CUTLASS_PRAGMA_UNROLL + for (int idx = 0; idx < FragmentCount::kRow; ++idx) { + + int tile_idx = idx / 2; + int quad_idx = idx % 2; + + int row_offset = tile_idx * kInterleavedTileRows; + frag_ptr[idx] = access_ptr[row_offset / kElementsPerAccess + quad_idx]; + } + } + else { + CUTLASS_PRAGMA_UNROLL + for (int idx = 0; idx < FragmentCount::kColumn; ++idx) { + + int tile_idx = idx / 2; + int quad_idx = idx % 2; + + int col_offset = tile_idx * kInterleavedTileColumns; + frag_ptr[idx] = access_ptr[col_offset / kElementsPerAccess + quad_idx]; + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + + load_with_pointer_offset(frag, byte_offset * 8 / sizeof_bits::value); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + + TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); + + load_with_pointer_offset(frag, ref_.offset(coord_offset)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + + TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); + + load_with_pointer_offset(frag, ref_.offset(coord_offset) + pointer_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + + TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); + + load_with_pointer_offset(frag, ref_.offset(coord_offset) + byte_offset * 8 / sizeof_bits::value); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_> +class MmaVoltaTensorOpMultiplicandTileIterator< + Shape_, + Operand::kA, + Element_, + cutlass::layout::RowMajor, + InstructionShape_, + OpDelta_, + 32 +> : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner< + Shape_, Operand::kA, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> { + +public: + using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner< + Shape_, Operand::kA, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> ; + + using TensorRef = typename Base::TensorRef; + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): Base(ref, lane_id) { } + +}; + +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_> +class MmaVoltaTensorOpMultiplicandTileIterator< + Shape_, + Operand::kA, + Element_, + cutlass::layout::ColumnMajor, + InstructionShape_, + OpDelta_, + 32 +> : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter< + Shape_, Operand::kA, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_> { + +public: + using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter< + Shape_, Operand::kA, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_> ; + + using TensorRef = typename Base::TensorRef; + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): Base(ref, lane_id) { } + +}; + +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_> +class MmaVoltaTensorOpMultiplicandTileIterator< + Shape_, Operand::kB, Element_, + cutlass::layout::ColumnMajor, + InstructionShape_, OpDelta_, 32 +> : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner< + Shape_, Operand::kB, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_> { + +public: + using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner< + Shape_, Operand::kB, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_>; + + using TensorRef = typename Base::TensorRef; + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): Base(ref, lane_id) { } +}; + +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_> +class MmaVoltaTensorOpMultiplicandTileIterator< + Shape_, Operand::kB, Element_, + cutlass::layout::RowMajor, + InstructionShape_, OpDelta_, 32 +> : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter< + Shape_, Operand::kB, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> { + +public: + using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter< + Shape_, Operand::kB, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_>; + + using TensorRef = typename Base::TensorRef; + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaVoltaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): Base(ref, lane_id) { } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h new file mode 100644 index 0000000000000000000000000000000000000000..beeff238305162c2e18bddc6b04062e6a3b34017 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h @@ -0,0 +1,2441 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm80.h" + +#include "cutlass/platform/platform.h" +#include "cutlass/fast_math.h" + +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for loading 128b vectors of 64b elements. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::TensorOpMultiplicandCongruous64b, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + static_assert(!(Shape::kContiguous % 16) && !(Shape::kStrided % 4), "Divisibility."); + + static_assert(sizeof_bits::value == 64, "This is specialized for 64b accesses."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::TensorOpMultiplicandCongruous64b; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Load two elements per access + static int const kElementsPerAccess = 2; + + /// Policy defining internal details of tile iterator + struct Policy { + + /// Shape of one access + using Delta = layout::PitchLinearShape<8, 4>; + + /// Number of iterations to load + using Iterations = layout::PitchLinearShape< + Shape::kContiguous / kElementsPerAccess / Delta::kContiguous, + InstructionShape::kStrided / Delta::kStrided + >; + + }; + +private: + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Pointer type used for accesses + using AccessType = AlignedArray; + + /// Internal counter used to jump to next K partition + int k_group_idx_; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + +private: + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): + stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0), + k_group_idx_(0) { + + int access_strided = lane_id / Policy::Delta::kContiguous; + int access_contiguous = (lane_id % Policy::Delta::kContiguous) ^ access_strided; + + pointer_= reinterpret_cast(ref.data()) + + access_contiguous + access_strided * stride_; + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + byte_offset_ += offset * sizeof(Element); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + int offset = + (tile_offset.strided() * InstructionShape::kStrided) * stride_ * kElementsPerAccess + + tile_offset.contiguous() * Shape::kContiguous; + + add_pointer_offset(offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + add_tile_offset({0, 1}); + + return *this; + } + + /// Advances the iterator along the opposite of the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + add_tile_offset({0, -1}); + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_byte_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + AccessType *fetch_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::Iterations::kStrided; ++s) { + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::Iterations::kContiguous; ++c) { + + int access_idx = c + s * Policy::Iterations::kContiguous; + + AccessType const *source_ptr = pointer_ + + Policy::Delta::kContiguous * c + + Policy::Delta::kStrided * s * stride_; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + byte_offset + byte_offset_; + + AccessType const *source = reinterpret_cast(source_byte_ptr); + + fetch_ptr[access_idx] = *source; + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + + Index pointer_offset = + tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess + + tile_offset.strided() * InstructionShape::kStrided * stride_; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicandCongruous64b, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.strided(), tile_offset.contiguous()}, + byte_offset); + } + + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared +/// memory and therefore must be initialized with a TensorRef to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicandCongruous64b, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.contiguous(), tile_offset.strided()}, + byte_offset); + } + + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for loading 128b vectors of 64b elements. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::TensorOpMultiplicand64bCrosswise, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + static_assert(!(Shape::kContiguous % 4) && !(Shape::kStrided % 16), "Divisibility."); + + static_assert(sizeof_bits::value == 64, "This is specialized for 64b accesses."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::TensorOpMultiplicand64bCrosswise; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Load two elements per access + static int const kElementsPerAccess = 2; + + /// Policy defining internal details of tile iterator + struct Policy { + + /// Shape of one access + using Delta = layout::PitchLinearShape<4, 16>; + + /// Number of iterations to load + using Iterations = layout::PitchLinearShape< + InstructionShape::kContiguous / Delta::kContiguous, + Shape::kStrided / Delta::kStrided + >; + + }; + +private: + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Pointer type used for accesses + using AccessType = AlignedArray; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + +private: + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + + /// Internal counter for tracking K-group + Index k_group_idx_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): + stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0), + k_group_idx_(0) { + + int access_strided = lane_id / 8; + int access_contiguous = (lane_id % 8); + + byte_offset_ = (access_contiguous + access_strided * stride_) * sizeof(AccessType); + + pointer_= reinterpret_cast(ref.data()); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + pointer_ += offset / kElementsPerAccess; + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + int offset = (tile_offset.contiguous() * InstructionShape::kContiguous) * + stride_ * kElementsPerAccess + + tile_offset.strided() * Shape::kStrided; + + add_pointer_offset(offset); + + int old_k_group_idx = k_group_idx_; + + k_group_idx_ += tile_offset.contiguous(); + + if ((k_group_idx_ & 2) ^ (old_k_group_idx & 2)) { + byte_offset_ ^= 0x40; + } + + return *this; + } + + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) { + + // TODO: fix this if it becomes an issue during warp it reset + add_tile_offset(tile_offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + pointer_ += stride_ * InstructionShape::kContiguous; + + if (k_group_idx_ & 0x1) { + // xor ptr + byte_offset_ ^= 0x40; + } + + ++k_group_idx_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_byte_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + AccessType *fetch_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::Iterations::kContiguous; ++c) { + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::Iterations::kStrided; ++s) { + + int access_idx = c + s * Policy::Iterations::kContiguous; + + AccessType const *source_ptr = pointer_ + + Policy::Delta::kContiguous * c * stride_ + + Policy::Delta::kStrided * s / kElementsPerAccess; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + byte_offset + byte_offset_; + + AccessType const *source = reinterpret_cast(source_byte_ptr); + + fetch_ptr[access_idx] = *source; + } + } + + Element *exchange_ptr = reinterpret_cast(&frag); + + if (k_group_idx_ & 1) { + // exchange on 64b granularity + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Fragment::kElements; i += 2) { + Element tmp = exchange_ptr[i]; + exchange_ptr[i] = exchange_ptr[i + 1]; + exchange_ptr[i + 1] = tmp; + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = tile_offset.contiguous() * + InstructionShape::kContiguous / + Layout::kElementsPerAccess + + tile_offset.strided() * Shape::kStrided * stride_; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + k_group_idx_ = k_group; + } +}; + +//////////////////////////////////////////////////////////////////////////////// +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicand64bCrosswise, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset_negative({tile_offset.column(), tile_offset.row()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.strided(), tile_offset.contiguous()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +//////////////////////////////////////////////////////////////////////////////// +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicand64bCrosswise, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset_negative({tile_offset.row(), tile_offset.column()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.contiguous(), tile_offset.strided()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + + +/// Tile iterator specialized for canonical matrix layouts +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Operand identity + Operand Operand_, + /// Data type of A elements + typename Element_, + /// Layout of operand + typename Layout_, + /// Shape of one matrix production operation (concept: MatrixShape) + typename InstructionShape_, + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + int OpDelta_, + /// Number of threads participating in one matrix operation + int Threads = 32, + /// Number of partitions along K dimension + int PartitionsK_ = 1> +class MmaTensorOpMultiplicandTileIteratorCanonical { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + /// Basic check + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = Layout_; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Number of elements accessed per Shared Memory load + static int const kElementsPerAccess = + (sizeof_bits::value >= 32 ? 1 : 32 / sizeof_bits::value); + +private: + + static int const kWarpShapeOuter = + (kOperand == Operand::kA ? Shape::kRow : Shape::kColumn); + + static int const kWarpShapeInner = + (kOperand == Operand::kA ? Shape::kColumn : Shape::kRow); + + + /// Rounded up instruction counts + using InstructionCount = MatrixShape< + Shape::kRow / InstructionShape::kRow, + Shape::kColumn / InstructionShape::kColumn + >; + + /// Rounded up tile dimensions + using WarpShapeDivisible = MatrixShape< + InstructionCount::kRow * InstructionShape::kRow, + InstructionCount::kColumn * InstructionShape::kColumn + >; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = Array< + Element, + WarpShapeDivisible::kRow * WarpShapeDivisible::kColumn / kThreads + >; + + /// Memory access type + using AccessType = AlignedArray; + +private: + + /// Underlying tensor reference + TensorRef ref_; + + /// Extent of tensor + MatrixCoord extent_; + + /// Origin + MatrixCoord origin_; + + /// Used to conditionally enable extents checking + bool divisible_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIteratorCanonical(): divisible_(true) { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIteratorCanonical( + TensorRef const &ref, + int lane_id + ): ref_(ref), extent_(Shape::kRow, Shape::kColumn), divisible_(true) { + + if (kOperand == Operand::kA) { + origin_ = MatrixCoord(lane_id / 4, (lane_id % 4) * kElementsPerAccess); + } + else { + origin_ = MatrixCoord((lane_id % 4) * kElementsPerAccess, lane_id / 4); + } + + ref_.add_coord_offset(origin_); + } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIteratorCanonical( + TensorRef const &ref, + TensorCoord extent, + int lane_id + ): ref_(ref), extent_(extent), divisible_(false) { + + if (kOperand == Operand::kA) { + origin_ = MatrixCoord(lane_id / 4, (lane_id % 4) * kElementsPerAccess); + } + else { + origin_ = MatrixCoord((lane_id % 4) * kElementsPerAccess, lane_id / 4); + } + + ref_.add_coord_offset(origin_); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIteratorCanonical &add_pointer_offset(LongIndex offset) { + + ref_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIteratorCanonical &add_tile_offset(TensorCoord const &tile_offset) { + + TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); + origin_ += coord_offset; + + ref_.add_coord_offset(coord_offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIteratorCanonical & operator++() { + + if (kOperand == Operand::kA) { + add_tile_offset({0, 1}); + } + else { + add_tile_offset({1, 0}); + } + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIteratorCanonical & operator--() { + + if (kOperand == Operand::kA) { + add_tile_offset({0, -1}); + } + else { + add_tile_offset({-1, 0}); + } + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIteratorCanonical & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIteratorCanonical & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_pointer_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + + int const kWarpShapeDivisibleInner = + (kOperand == Operand::kA ? WarpShapeDivisible::kColumn : WarpShapeDivisible::kRow); + + // Take advantage of Tensor Op's 8 x 4T access pattern + int const kAccessesInner = (kWarpShapeDivisibleInner / kElementsPerAccess) / 4; + + AccessType *access_ptr = reinterpret_cast(&frag); + + if (kOperand == Operand::kA) { + int const kTilesPerInstruction = InstructionShape::kRow / 8; + + CUTLASS_PRAGMA_UNROLL + for (int inst_m_idx = 0; inst_m_idx < InstructionCount::kRow; ++inst_m_idx) { + + CUTLASS_PRAGMA_UNROLL + for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) { + + CUTLASS_PRAGMA_UNROLL + for (int access_m_idx = 0; access_m_idx < kTilesPerInstruction; ++access_m_idx) { + int access_idx = + access_m_idx + kTilesPerInstruction * (inner_idx + kAccessesInner * inst_m_idx); + + MatrixCoord offset( + access_m_idx * 8 + inst_m_idx * InstructionShape::kRow, + inner_idx * 4 * kElementsPerAccess); + + MatrixCoord access_coord = origin_ + offset; + + if (divisible_ || + (access_coord.row() < extent_.row() && access_coord.column() < extent_.column())) { + + access_ptr[access_idx] = *reinterpret_cast( + ref_.data() + ref_.offset(offset)); + } + else { + AccessType zero; + zero.clear(); + access_ptr[access_idx] = zero; + } + } + } + } + } + else { + CUTLASS_PRAGMA_UNROLL + for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn; ++inst_n_idx) { + + CUTLASS_PRAGMA_UNROLL + for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) { + int access_idx = inner_idx + kAccessesInner * inst_n_idx; + + MatrixCoord offset( + inner_idx * 4 * kElementsPerAccess, + inst_n_idx * 8); + + MatrixCoord access_coord = origin_ + offset; + + if (divisible_ || + (access_coord.row() < extent_.row() && access_coord.column() < extent_.column())) { + + access_ptr[access_idx] = *reinterpret_cast( + ref_.data() + ref_.offset(offset)); + } + else { + AccessType zero; + zero.clear(); + access_ptr[access_idx] = zero; + } + } + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + + load_with_pointer_offset(frag, byte_offset * 8 / sizeof_bits::value); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + + TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); + + load_with_pointer_offset(frag, ref_.offset(coord_offset)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + + TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); + + load_with_pointer_offset(frag, ref_.offset(coord_offset) + pointer_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + + TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); + + load_with_pointer_offset(frag, ref_.offset(coord_offset) + byte_offset * 8 / sizeof_bits::value); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation + } +}; + +/// Wrapper for ColumnMajor +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::ColumnMajor, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::ColumnMajor; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIteratorCanonical< + Shape, kOperand, Element, + layout::ColumnMajor, + InstructionShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + TensorCoord const & extent, + int lane_id + ): iterator_({ref.data(), ref.stride()}, extent, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.contiguous(), tile_offset.strided()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + + +/// Wrapper for RowMajor +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::RowMajor, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajor; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIteratorCanonical< + Shape, kOperand, Element, + layout::RowMajor, + InstructionShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + TensorCoord const &extent, + int lane_id + ): iterator_({ref.data(), ref.stride()}, extent, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.contiguous(), tile_offset.strided()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sparse.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sparse.h new file mode 100644 index 0000000000000000000000000000000000000000..f7370a648dfb939fe0c5e19ba661f8c01c5aeadb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sparse.h @@ -0,0 +1,380 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines iterators to load sparse meta data used by warp-level matrix multiply operations + targeting Sparse Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" + +#include "cutlass/platform/platform.h" +#include "cutlass/fast_math.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of A elements + typename Element_, + /// Layout of operand + typename Layout_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + int OpDelta_, + /// Number of threads participating in one matrix operation + int Threads, + /// Number of partitions along K dimension + int PartitionsK_ = 1> +class SparseMmaTensorOpMetaTileIterator { + public: + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = Layout_; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + static int const kSparse = 2; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static_assert( + !(Shape::kColumn % InstructionShape::kColumn), + "Shape of warp-level Mma must be divisible by operator shape."); + + static int const kElementsPerAccess = 128 / sizeof_bits::value; + + // Determine number of elements along outer dimension per individual LDSM op + static int const kLdsmOpOuter = InstructionShape::kColumn; + static int const kLdsmOpInner = 8 * kElementsPerAccess / kLdsmOpOuter; + + static_assert(!(Shape::kColumn % kLdsmOpOuter), + "Shape of warp-level mma must be divisible by LDSM's " + "fundamental tile size."); + + static_assert(!(Shape::kRow % kLdsmOpInner), + "Shape of warp-level mma must be divisible by LDSM's " + "fundamental tile size."); + + /// Shape of one individual LDSM instruction + static int const LdsmShapeColumn = + InstructionShape::kColumn / kLdsmOpOuter; + static int const LdsmShapeRow = + ((4 / LdsmShapeColumn * kLdsmOpInner) > Shape::kRow) + ? (Shape::kRow / kLdsmOpInner) + : (4 / LdsmShapeColumn); + using LdsmShape = + layout::PitchLinearShape; + + /// Number and arrangement of LDSM instructions + using LdsmIterations = layout::PitchLinearShape< + Shape::kRow / kLdsmOpInner / LdsmShapeRow, + 1>; + + /// Number of groups for each tile + static int const kGroupsPerTile = + Shape::kColumn / InstructionShape::kColumn; + }; + + private: + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Pointer type used for accesses + using AccessType = Array; + + public: + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + + private: + + /// Layout object storing stride values + Index stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + + /// Internal counter used to determine when to increment byte offset and when + /// to XOR it + int k_group_idx_; + + public: + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + SparseMmaTensorOpMetaTileIterator() + : pointer_(nullptr), + stride_(0), + byte_offset_(0), + k_group_idx_(0) {} + + /// Constructor from TensorRef + CUTLASS_DEVICE + SparseMmaTensorOpMetaTileIterator(TensorRef const &ref, int lane_id) + : pointer_(reinterpret_cast(ref.data())), + stride_(ref.stride(0) / Policy::kElementsPerAccess), + byte_offset_(0), + k_group_idx_(0) { + + int access_contiguous = (lane_id % (Shape::kRow / Policy::kElementsPerAccess)); + int access_strided = (lane_id / (Shape::kRow / Policy::kElementsPerAccess)); + + byte_offset_ = (access_contiguous + access_strided * stride_) * + sizeof_bits::value * Policy::kElementsPerAccess / 8; + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + SparseMmaTensorOpMetaTileIterator &add_pointer_offset(LongIndex offset) { + byte_offset_ += offset * sizeof_bits::value / 8; + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_DEVICE + SparseMmaTensorOpMetaTileIterator &add_tile_offset( + TensorCoord const &tile_offset) { + int offset = tile_offset.row() * Shape::kRow + + tile_offset.column() * InstructionShape::kColumn * stride_ * + Policy::kElementsPerAccess; + + add_pointer_offset(offset); + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + SparseMmaTensorOpMetaTileIterator &operator++() { + add_tile_offset({0, 1}); + + if (kPartitionsK > 1) { + ++k_group_idx_; + // Jump to next stage + if (k_group_idx_ == Policy::kGroupsPerTile) { + k_group_idx_ = 0; + add_tile_offset( + {0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)}); + } + } + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + SparseMmaTensorOpMetaTileIterator &operator--(){ + byte_offset_ -= stride_ * InstructionShape::kColumn * + sizeof_bits::value * Policy::kElementsPerAccess / + 8; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE SparseMmaTensorOpMetaTileIterator & + operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + SparseMmaTensorOpMetaTileIterator &operator-=( + TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + Array *fetch_ptr = + reinterpret_cast *>(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) { + + int access_idx = c + s * Policy::LdsmIterations::kContiguous; + + AccessType const *source_ptr = + pointer_ + + Policy::LdsmShape::kContiguous * Policy::kLdsmOpInner * c + + Policy::LdsmShape::kStrided * s * stride_; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + + byte_offset + byte_offset_; + + cutlass::arch::ldsm( + fetch_ptr[access_idx], source_byte_ptr); + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = + tile_offset.contiguous() * Shape::kRow / Layout::kElementsPerAccess + + tile_offset.strided() * InstructionShape::kColumn * stride_; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no op + } +}; + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h new file mode 100644 index 0000000000000000000000000000000000000000..d841d2bcca38ae10f45e1874cb4002a2ae543ee6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h @@ -0,0 +1,805 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. +*/ + +#pragma once + + +#include "cutlass/cutlass.h" +#include "cutlass/arch/wmma.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) + +#include "cutlass/wmma_array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" + +#include "cutlass/platform/platform.h" +#include "cutlass/fast_math.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +//////////////////////////////////////////////////////////////////////////////// +template < + ///< Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Operand identity (A or B) + Operand Operand, + /// Data type of operand + typename Element_, + /// Layout of operand + typename Layout_, + /// Delta between *MMA operations (in units of *WMMA operations, concept:MatrixShape) + int OpDelta_, + /// Number of threads participating in one matrix operation + int Threads, + /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) + typename Policy_> +class MmaTensorOpWmmaMultiplicandTileIterator; + + +//////////////////////////////////////////////////////////////////////////////// +/// This tile iterator is specialized for 32-thread WMMA operation. +/// It uses nvcuda::wmma::load_matrix_sync to load from shared +/// memory and therefore must be initialized with a TensorRef to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +//////////////////////////////////////////////////////////////////////////////// +template < + ///< Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Layout of operand + typename Layout_, + /// Interval between adjacent *WMMA instructions (in units of WMMA instructions) + int OpDelta_, + /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) + typename Policy_> +class MmaTensorOpWmmaMultiplicandTileIterator< + Shape_, Operand::kA, Element_, Layout_, + OpDelta_, 32, Policy_> { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kA; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = Layout_; + + /// Delta between *WMMA operations + static int const kOpDelta = OpDelta_; + + /// Wmma Operator information and operation delta + using Policy = Policy_; + + + // + // Derived quantities + // + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Stride Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Native Wmma shape for operand A (concept MatrixShape) + using WmmaShape = MatrixShape< + Policy::Operator::Shape::kM, + Policy::Operator::Shape::kK + >; + + /// Map cutlass dataype to nvcuda::wmma datatype + using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType::Type; + + /// Shape of individual WMMA load / stores for operand A + using Iterations = MatrixShape< + Shape::kRow / WmmaShape::kRow, + 1 + >; + + /// Fragment object holding a warps part + using Fragment = WmmaFragmentArray; + + + ////////////////////////////////////////////////////////////////////////////////////////////////////// + /// statically assert this specialization + ///////////////////////////////////////////////////////////////////////////////////////////////////// + /// This iterator is specalized for Operand A + static_assert(kOperand == Operand::kA, + "MmaTensorOpWmmaMultiplicandTileIterator may only be instantiated for A operands to warp-level Mma."); + + /// Supported memory layouts + static_assert( + platform::is_same::value || + platform::is_same::value, + "Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor"); + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + ///////////////////////////////////////////////////////////////////////////////////////////////////// + +private: + + /// Shared memory base pointers - not advanced + char const *pointer_; + + /// Byte offset into shared memory - advanced + Index byte_offset_; + + /// Stride in units of number of elements + StrideIndex stride_; + + /// Layout of shared memory + Layout layout_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): pointer_(reinterpret_cast(ref.data())), byte_offset_(0), stride_(ref.stride(0)), layout_(ref.stride(0)) { + + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + byte_offset_ += (offset * sizeof_bits::value) / 8; + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + Index elements_offset = layout_({tile_offset.row() * Shape::kRow, tile_offset.column() * WmmaShape::kColumn}); + + byte_offset_ += (elements_offset * sizeof_bits::value) / 8; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator++() { + + Index elements_offset = layout_({0, WmmaShape::kColumn}); + + byte_offset_ += (elements_offset * sizeof_bits::value) / 8; + + return *this; + } + + /// Advances the iterator along the opposite of the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator--() { + + Index elements_offset = layout_({0, WmmaShape::kColumn}); + + byte_offset_ -= (elements_offset * sizeof_bits::value) / 8; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load_with_byte_offset(Fragment &frag, Index byte_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kColumn; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Iterations::kRow; ++m) { + + Index load_byte_offset = layout_({m * WmmaShape::kRow, k * WmmaShape::kColumn}) * sizeof_bits::value / 8; + + const WmmaDataType *ptr = reinterpret_cast(pointer_ + byte_offset_ + load_byte_offset + byte_offset); + + nvcuda::wmma::load_matrix_sync(frag[m], ptr, stride_); + + } + } + } + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_byte_offset(frag, 0); + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store_with_byte_offset(Fragment const &frag, Index byte_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kColumn; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Iterations::kRow; ++m) { + + Index store_byte_offset = layout_({m * WmmaShape::kRow, k * WmmaShape::kColumn}) * sizeof_bits::value / 8; + + WmmaDataType *ptr = reinterpret_cast(pointer_ + byte_offset_ + store_byte_offset + byte_offset); + + nvcuda::wmma::store_matrix_sync(ptr, frag[m], stride_); + + } + } + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_byte_offset(frag, 0); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation here + } +}; + + +//////////////////////////////////////////////////////////////////////////////// +/// This tile iterator is specialized for 32-thread WMMA operation. +/// It uses nvcuda::wmma::load_matrix_sync to load from shared +/// memory and therefore must be initialized with a TensorRef to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +//////////////////////////////////////////////////////////////////////////////// + +template < + ///< Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Layout of operand + typename Layout_, + /// Interval between adjacent *WMMA instructions (in units of WMMA instructions) + int OpDelta_, + /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) + typename Policy_> +class MmaTensorOpWmmaMultiplicandTileIterator< + Shape_, Operand::kB, Element_, Layout_, + OpDelta_, 32, Policy_> { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kB; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = Layout_; + + /// Delta between *WMMA operations + static int const kOpDelta = OpDelta_; + + /// Wmma Operator information and operation delta + using Policy = Policy_; + + + // + // Derived quantities + // + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Stride Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Native Wmma shape (concept MatrixShape) + using WmmaShape = MatrixShape< + Policy::Operator::Shape::kK, + Policy::Operator::Shape::kN + >; + + /// Map cutlass dataype to nvcuda::wmma datatype + using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType::Type; + + /// Shape of individual WMMA load / stores for operand B + using Iterations = MatrixShape< + 1, + Shape::kColumn / WmmaShape::kColumn + >; + + /// Fragment object holding a warps part + using Fragment = WmmaFragmentArray; + + + ////////////////////////////////////////////////////////////////////////////////////////////////////// + /// statically asserts this specialization + ///////////////////////////////////////////////////////////////////////////////////////////////////// + /// This iterator is specalized for Operand B + static_assert(kOperand == Operand::kB, + "MmaTensorOpWmmaMultiplicandTileIterator may only be instantiated for B operands to warp-level Mma."); + + /// Supported memory layouts + static_assert( + platform::is_same::value || + platform::is_same::value, + "Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor"); + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + ///////////////////////////////////////////////////////////////////////////////////////////////////// + +private: + + /// Shared memory base pointers - not advanced + char const *pointer_; + + /// Byte offset into shared memory - advanced + Index byte_offset_; + + /// Stride in units of number of elements + StrideIndex stride_; + + /// Layout of shared memory + Layout layout_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): pointer_(reinterpret_cast(ref.data())), byte_offset_(0), stride_(ref.stride(0)), layout_(ref.stride(0)) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + byte_offset_ += (offset * sizeof_bits::value) / 8; + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + Index elements_offset = layout_({tile_offset.row() * WmmaShape::kRow, tile_offset.column() * Shape::kColumn}); + + byte_offset_ += (elements_offset * sizeof_bits::value) / 8; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator++() { + + Index elements_offset = layout_({WmmaShape::kRow, 0}); + + byte_offset_ += (elements_offset * sizeof_bits::value) / 8; + + return *this; + } + + /// Advances the iterator along the opposite of the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator--() { + + Index elements_offset = layout_({WmmaShape::kRow, 0}); + + byte_offset_ -= (elements_offset * sizeof_bits::value) / 8; + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load_with_byte_offset(Fragment &frag, Index byte_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kRow; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Iterations::kColumn; ++n) { + + Index load_byte_offset = layout_({k * WmmaShape::kRow, n * WmmaShape::kColumn}) * sizeof_bits::value / 8; + + const WmmaDataType *ptr = reinterpret_cast(pointer_ + byte_offset_ + load_byte_offset + byte_offset); + + nvcuda::wmma::load_matrix_sync(frag[n], ptr, stride_); + } + } + } + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_byte_offset(frag, 0); + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store_with_byte_offset(Fragment const &frag, Index byte_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kRow; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Iterations::kColumn; ++n) { + + Index store_byte_offset = layout_({k * WmmaShape::kRow, n * WmmaShape::kColumn}) * sizeof_bits::value / 8; + + WmmaDataType *ptr = reinterpret_cast(pointer_ + byte_offset_ + store_byte_offset + byte_offset); + + nvcuda::wmma::store_matrix_sync(ptr, frag[n], stride_); + } + } + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_byte_offset(frag, 0); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation here + } +}; + +//////////////////////////////////////////////////////////////////////////////// +template < + ///< Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Element type + typename Element_, + /// Layout of operand in memory + typename Layout_, + /// Interval between adjacent *WMMA instructions (in units of WMMA instructions, concept: MatrixShape) + typename OpDelta_, + /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) + typename Policy_> +class MmaTensorOpWmmaAccumulatorTileIterator; + +//////////////////////////////////////////////////////////////////////////////// +/// This tile iterator is specialized for 32-thread WMMA operation. +/// It uses nvcuda::wmma::store_matrix_sync to load from shared +/// memory and therefore must be initialized with a TensorRef to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept | +/// WriteableRandomAccessContiguousTileIteratorConcept +/// +//////////////////////////////////////////////////////////////////////////////// + +template < + ///< Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Layout of operand in memory + typename Layout_, + /// Interval between adjacent *WMMA instructions (in units of WMMA instructions) + typename OpDelta_, + /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) + typename Policy_> +class MmaTensorOpWmmaAccumulatorTileIterator +{ + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = Layout_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + using OpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Wmma Operator information and operation delta + using Policy = Policy_; + + + // + // Derived quantities + // + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Native Wmma shape (concept MatrixShape) + using WmmaShape = MatrixShape< + Policy::Operator::Shape::kM, + Policy::Operator::Shape::kN + >; + + /// Map cutlass dataype to nvcuda::wmma datatype + using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType::Type; + + /// Map cutlass::layout to nvuda::wmma::layout_t enum + static nvcuda::wmma::layout_t const WmmaLayout = cutlass::arch::CutlassToWmmaLayout::value; + + /// Shape of individual WMMA load / stores for accumulator + using Iterations = MatrixShape< + Shape::kRow / WmmaShape::kRow, + Shape::kColumn / WmmaShape::kColumn + >; + + /// Fragment object holding a thread's part of a tile + using Fragment = WmmaFragmentArray; + + ////////////////////////////////////////////////////////////////////////////////////////////////////// + /// statically asserts this specialization + ///////////////////////////////////////////////////////////////////////////////////////////////////// + /// Supported layouts + static_assert( + platform::is_same::value || + platform::is_same::value, + "Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor"); + +private: + + /// Internal reference + cutlass::TensorRef ref_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator( + TensorRef const &ref, + int lane_id + ): ref_(ref) { } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + ref_.add_coord_offset({tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn}); + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator & operator++() { + ref_.add_coord_offset({Shape::kRow, 0}); + return *this; + } + + /// Advances the iterator along the opposite of the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator & operator--() { + ref_.add_coord_offset({-Shape::kRow, 0}); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Iterations::kRow; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Iterations::kColumn; ++n) { + + const WmmaDataType * ptr = reinterpret_cast (ref_.data() + ref_.offset({m * WmmaShape::kRow, n * WmmaShape::kColumn}) + pointer_offset); + + nvcuda::wmma::load_matrix_sync(frag[m * Iterations::kColumn + n], ptr, ref_.stride()[0], WmmaLayout); + + } + } + } + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Iterations::kRow; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Iterations::kColumn; ++n) { + + WmmaDataType * ptr = reinterpret_cast (ref_.data() + ref_.offset({m * WmmaShape::kRow, n * WmmaShape::kColumn}) + pointer_offset); + + nvcuda::wmma::store_matrix_sync(ptr, frag[m * Iterations::kColumn + n], ref_.stride()[0], WmmaLayout); + } + } + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation here + } +}; + + + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// + +#endif // if defined(CUTLASS_ARCH_WMMA_ENABLED) + + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_wmma.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_wmma.h new file mode 100644 index 0000000000000000000000000000000000000000..c3954f3479c42d0a89f29814a40e207d44d1c2d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_wmma.h @@ -0,0 +1,223 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing warp-level matrix multiply-accumulate operations targeting + Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/arch/wmma.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) + +#include "cutlass/wmma_array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/arch/mma_sm75.h" +#include "cutlass/arch/mma_sm80.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma.h" + +#include "cutlass/gemm/warp/mma_tensor_op_policy.h" + +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///< Structure to compute the matrix product targeting CUDA cores via WMMA. +template < + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + ///< Data type of A elements + typename ElementA_, + ///< Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + ///< Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + ///< Element type of C matrix + typename ElementC_, + ///< Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + ///< Policy describing warp-level Wmma operation (concept: MmaTensorOpPolicy) + typename Policy_, + ///< Number of partitions along K dimension + int PartitionsK_ = 1, + ///< Used for partial specialization + typename Enable = bool +> +class MmaTensorOpWmma { +public: + ///< Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + ///< Data type of multiplicand A + using ElementA = ElementA_; + + ///< Layout of multiplicand A + using LayoutA = LayoutA_; + + ///< Data type of multiplicand B + using ElementB = ElementB_; + + ///< Layout of multiplicand B + using LayoutB = LayoutB_; + + ///< Data type of accumulator matrix C + using ElementC = ElementC_; + + ///< Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) + using Policy = Policy_; + + /// Underlying instruction shape + using InstructionShape = typename Policy::Operator::Shape; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename Policy::Operator; + + /// Indicates math operator + using MathOperator = typename ArchMmaOperator::Operator; + + /// Underlying architecture tag + using ArchTag = typename Policy::Operator::ArchTag; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = ComplexTransform::kNone; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = ComplexTransform::kNone; + + /// Indicates class of matrix operator + using OperatorClass = arch::OpClassWmmaTensorOp; + + /// Number of threads participating in warp-level matrix product + static int const kThreadCount = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + +public: + + /// Iterates over the A operand in memory + using IteratorA = MmaTensorOpWmmaMultiplicandTileIterator< + MatrixShape, Operand::kA, ElementA, LayoutA, + Policy::OpDelta::kRow, kThreadCount, Policy>; + + /// Storage for A tile + using FragmentA = typename IteratorA::Fragment; + + /// Iterates over the B operand in memory + using IteratorB = MmaTensorOpWmmaMultiplicandTileIterator< + MatrixShape, Operand::kB, ElementB, LayoutB, + Policy::OpDelta::kRow, kThreadCount, Policy>; + + /// Storage for B tile + using FragmentB = typename IteratorB::Fragment; + + /// Iterates over the C operand in memory + using IteratorC = MmaTensorOpWmmaAccumulatorTileIterator< + MatrixShape, ElementC, LayoutC, + typename Policy::OpDelta, Policy>; + + /// Storage for C tile + using FragmentC = typename IteratorC::Fragment; + +private: + + static_assert( + !(Shape::kM % Policy::Operator::Shape::kM) && + !(Shape::kN % Policy::Operator::Shape::kN), + "Shape of warp-level Wmma must be divisible by operator shape (wmma native size)"); + + /// Number of wmma operations performed + using WmmaIterations = MatrixShape< + Shape::kM / Policy::Operator::Shape::kM, + Shape::kN / Policy::Operator::Shape::kN + >; + +public: + + /// Underlying matrix multiply operator (concept: cutlass::arch::Wmma) + typename Policy::Operator wmma; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + MmaTensorOpWmma() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + FragmentA const &A, + FragmentB const &B, + FragmentC const &C) const { + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < WmmaIterations::kColumn; ++n) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < WmmaIterations::kRow; ++m) { + + // accumulate wmma mma + wmma(D[m * WmmaIterations::kColumn + n], A[m], B[n], C[m * WmmaIterations::kColumn + n]); + } + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +#endif // if defined(CUTLASS_ARCH_WMMA_ENABLED) + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_with_reduction_tensor_op.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_with_reduction_tensor_op.h new file mode 100644 index 0000000000000000000000000000000000000000..995796796dcb7150026bb7428cd7af457d6d7a5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/mma_with_reduction_tensor_op.h @@ -0,0 +1,449 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing warp-level matrix multiply-accumulate operations targeting + Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/platform/platform.h" + +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/arch/mma_sm75.h" +#include "cutlass/arch/mma_sm80.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma.h" + +#include "cutlass/gemm/warp/mma_tensor_op_policy.h" +#include "cutlass/gemm/warp/mma_tensor_op.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename ElementC_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Reduce operand A or B along K dimension + bool ReduceKForA_, + /// Number of partitions along K dimension + int PartitionsK_ = 1, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false, + /// Used for partial specialization + typename Enable = bool +> +class MmaWithReductionTensorOp { +public: + /// Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + /// Data type of multiplicand A + using ElementA = ElementA_; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of multiplicand B + using ElementB = ElementB_; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of accumulator matrix C + using ElementC = ElementC_; + + /// Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) + using Policy = Policy_; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename Policy::Operator; + + /// Indicates math operator + using MathOperator = typename ArchMmaOperator::Operator; + + /// Architecture tag from underlying instruction + using ArchTag = typename ArchMmaOperator::ArchTag; + + /// Indicates class of matrix operator + using OperatorClass = arch::OpClassTensorOp; + + /// Shape of underlying instruction + using InstructionShape = typename ArchMmaOperator::Shape; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = ComplexTransform::kNone; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = ComplexTransform::kNone; + + /// Number of threads participating in warp-level matrix product + static int const kThreadCount = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + static bool const kReduceKForA = ReduceKForA_; + + static_assert(platform::is_same::value || + platform::is_same::value, + "ElementA needs to be fp16 or bf16."); + + static_assert(platform::is_same::value || + platform::is_same::value, + "ElementB needs to be fp16 or bf16."); + + static_assert(platform::is_same>::value, + "Only supports 16x8x16 tensor core instruction."); + + static_assert(!AccumulatorsInRowMajor, + "Only calls tensor core instructions in column major."); + +public: + + /// Iterates over the A operand in memory + using IteratorA = MmaTensorOpMultiplicandTileIterator< + MatrixShape, Operand::kA, ElementA, LayoutA, + MatrixShape, + Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; + + /// Storage for A tile + using FragmentA = typename IteratorA::Fragment; + + /// Storage for transformed A tile + using TransformedFragmentA = + Array; + + /// Iterates over the B operand in memory + using IteratorB = MmaTensorOpMultiplicandTileIterator< + MatrixShape, Operand::kB, ElementB, LayoutB, + MatrixShape, + Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; + + /// Storage for B tile + using FragmentB = typename IteratorB::Fragment; + + /// Storage for transformed B tile + using TransformedFragmentB = + Array; + + /// Iterates over the C operand in memory + using IteratorC = MmaTensorOpAccumulatorTileIterator< + MatrixShape, ElementC, LayoutC, + typename ArchMmaOperator::Shape, typename Policy::OpDelta>; + + /// Storage for C tile + using FragmentC = typename IteratorC::Fragment; + + /// Number of mma operations performed + using MmaIterations = MatrixShape< + (Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM, + (Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN + >; + + using FragmentReduction = Array; + +public: + + /// Underlying matrix multiply operator (concept: arch::Mma) + ArchMmaOperator mma; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + MmaWithReductionTensorOp() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + TransformedFragmentA const &A, + TransformedFragmentB const &B, + FragmentC const &C, + FragmentReduction &gemm_k_reduction + ) const { + + using MmaOperandA = typename ArchMmaOperator::FragmentA; + using MmaOperandB = typename ArchMmaOperator::FragmentB; + using MmaOperandC = typename ArchMmaOperator::FragmentC; + + D = C; + + [[maybe_unused]] MmaOperandA const *ptr_A = reinterpret_cast(&A); + [[maybe_unused]] MmaOperandB const *ptr_B = reinterpret_cast(&B); + [[maybe_unused]] MmaOperandC *ptr_D = reinterpret_cast(&D); + + #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800) + assert(0); + #elif defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) + // Serpentine visitation order maximizing reuse of Ra + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < MmaIterations::kRow; ++m) { + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n); + + mma(ptr_D[m + n_serpentine * MmaIterations::kRow], + ptr_A[m], + ptr_B[n_serpentine], + ptr_D[m + n_serpentine * MmaIterations::kRow]); + + if (!kReduceKForA && m == 0) { + #if 0 + gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4]); + gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4 + 1]); + gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4 + 2]); + gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4 + 3]); + #else + uint32_t const *tmp = reinterpret_cast(&B); + + if (platform::is_same::value) { + asm volatile( + "{\n\t" + " .reg .f16 low, high;\n\t" + " .reg .f32 tmp;\n\t" + " mov.b32 {low, high}, %1;\n\t" + " cvt.f32.f16 tmp, low;\n\t" + " add.f32 %0, tmp, %0;\n\t" + " cvt.f32.f16 tmp, high;\n\t" + " add.f32 %0, tmp, %0;\n\t" + " mov.b32 {low, high}, %2;\n\t" + " cvt.f32.f16 tmp, low;\n\t" + " add.f32 %0, tmp, %0;\n\t" + " cvt.f32.f16 tmp, high;\n\t" + " add.f32 %0, tmp, %0;\n\t" + "}\n\t" + : "+f"(gemm_k_reduction[n_serpentine]) + : "r"(tmp[n_serpentine * 2]), "r"(tmp[n_serpentine * 2 + 1])); + } else if (platform::is_same::value) { + asm volatile( + "{\n\t" + " .reg .f32 tmp;\n\t" + " shl.b32 tmp, %1, 16;\n\t" + " add.f32 %0, tmp, %0;\n\t" + " and.b32 tmp, %1, 0xffff0000;\n\t" + " add.f32 %0, tmp, %0;\n\t" + " shl.b32 tmp, %2, 16;\n\t" + " add.f32 %0, tmp, %0;\n\t" + " and.b32 tmp, %2, 0xffff0000;\n\t" + " add.f32 %0, tmp, %0;\n\t" + "}\n\t" + : "+f"(gemm_k_reduction[n_serpentine]) + : "r"(tmp[n_serpentine * 2]), "r"(tmp[n_serpentine * 2 + 1])); + } else { + assert(0); + } + #endif + } + + if (kReduceKForA && (n == 0)) { + #if 0 + gemm_k_reduction[m * 2] += float(A[m * 8]); + gemm_k_reduction[m * 2] += float(A[m * 8 + 1]); + gemm_k_reduction[m * 2] += float(A[m * 8 + 4]); + gemm_k_reduction[m * 2] += float(A[m * 8 + 5]); + + gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 2]); + gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 3]); + gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 6]); + gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 7]); + #else + uint32_t const *tmp = reinterpret_cast(&A); + + if (platform::is_same::value) { + asm volatile( + "{\n\t" + " .reg .f16 low, high;\n\t" + " .reg .f32 tmp;\n\t" + " mov.b32 {low, high}, %2;\n\t" + " cvt.f32.f16 tmp, low;\n\t" + " add.f32 %0, tmp, %0;\n\t" + " cvt.f32.f16 tmp, high;\n\t" + " add.f32 %0, tmp, %0;\n\t" + " mov.b32 {low, high}, %3;\n\t" + " cvt.f32.f16 tmp, low;\n\t" + " add.f32 %1, tmp, %1;\n\t" + " cvt.f32.f16 tmp, high;\n\t" + " add.f32 %1, tmp, %1;\n\t" + " mov.b32 {low, high}, %4;\n\t" + " cvt.f32.f16 tmp, low;\n\t" + " add.f32 %0, tmp, %0;\n\t" + " cvt.f32.f16 tmp, high;\n\t" + " add.f32 %0, tmp, %0;\n\t" + " mov.b32 {low, high}, %5;\n\t" + " cvt.f32.f16 tmp, low;\n\t" + " add.f32 %1, tmp, %1;\n\t" + " cvt.f32.f16 tmp, high;\n\t" + " add.f32 %1, tmp, %1;\n\t" + "}\n\t" + : "+f"(gemm_k_reduction[m * 2]), "+f"(gemm_k_reduction[m * 2 + 1]) + : "r"(tmp[m * 4]), "r"(tmp[m * 4 + 1]),"r"(tmp[m * 4 + 2]), "r"(tmp[m * 4 + 3])); + + } else if (platform::is_same::value) { + + asm volatile( + "{\n\t" + " .reg .f32 tmp;\n\t" + " shl.b32 tmp, %2, 16;\n\t" + " add.f32 %0, tmp, %0;\n\t" + " and.b32 tmp, %2, 0xffff0000;\n\t" + " add.f32 %0, tmp, %0;\n\t" + " shl.b32 tmp, %3, 16;\n\t" + " add.f32 %1, tmp, %1;\n\t" + " and.b32 tmp, %3, 0xffff0000;\n\t" + " add.f32 %1, tmp, %1;\n\t" + " shl.b32 tmp, %4, 16;\n\t" + " add.f32 %0, tmp, %0;\n\t" + " and.b32 tmp, %4, 0xffff0000;\n\t" + " add.f32 %0, tmp, %0;\n\t" + " shl.b32 tmp, %5, 16;\n\t" + " add.f32 %1, tmp, %1;\n\t" + " and.b32 tmp, %5, 0xffff0000;\n\t" + " add.f32 %1, tmp, %1;\n\t" + "}\n\t" + : "+f"(gemm_k_reduction[m * 2]), "+f"(gemm_k_reduction[m * 2 + 1]) + : "r"(tmp[m * 4]), "r"(tmp[m * 4 + 1]),"r"(tmp[m * 4 + 2]), "r"(tmp[m * 4 + 3])); + + } else { + assert(0); + } + #endif + } + } + } + #else + assert(0); + #endif + } + + /// Transform the mma operands to the required types + CUTLASS_DEVICE + void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, + FragmentA const &A, FragmentB const &B) const { + + // + // Define conversions from source type to instruction type + // + FloatRoundStyle const kRoundA = + PreferredRoundingMode::kRound; + FloatRoundStyle const kRoundB = + PreferredRoundingMode::kRound; + #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800) + detail::ConvertAndPack + convert_A; + NumericArrayConverter + convert_B; + Array const *ptr_B = + reinterpret_cast const *>(&B); + Array * + ptr_dst_B = reinterpret_cast *>(&dst_B); + + dst_A = convert_A(A); + + ptr_dst_B[0] = convert_B(ptr_B[0]); + ptr_dst_B[1] = convert_B(ptr_B[1]); + + #elif defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) + detail::ConvertAndPack + convert_A; + NumericArrayConverter + convert_B; + Array const *ptr_A = + reinterpret_cast const *>(&A); + Array * + ptr_dst_A = reinterpret_cast *>(&dst_A); + + dst_B = convert_B(B); + + ptr_dst_A[0] = convert_A(ptr_A[0]); + ptr_dst_A[1] = convert_A(ptr_A[1]); + #else + assert(0); + #endif + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/scale_bias_tile_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/scale_bias_tile_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..aebeea7900875d1f6ab18a7521cf1a4dd19663ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/scale_bias_tile_iterator.h @@ -0,0 +1,572 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Defines iterators used by warp-level loading scale and bias vectors. + Every scale/bias data only needs to be loaded once for every channel. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" + +#include "cutlass/platform/platform.h" +#include "cutlass/fast_math.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of A elements + typename Element_, + /// Layout of operand + typename Layout_, + /// Shape of one matrix production operation (concept: GemmShape) + typename InstructionShape_, + /// Policy of the details of LDSM shape and iterations + typename Policy_, + /// Number of threads participating in one matrix operation + int Threads, + /// Number of partitions along K dimension + int PartitionsK_ = 1> +class ScaleBiasTileIterator; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to +/// load from shared memory and therefore must be initialized with a TensorRef +/// to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Policy of the details of LDSM shape and iterations + typename Policy_, + /// Number of partitions along K dimension + int PartitionsK_> +class ScaleBiasTileIterator { + public: + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::PitchLinear; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// Number of partitions along K dimension + static int const kElementsPerAccess = 128 / sizeof_bits::value; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + using Policy = Policy_; + + private: + + /// Pointer type used for accesses + using AccessType = Array; + + public: + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = Array; + + private: + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + + /// Internal counter used to determine when to increment byte offset and when + /// to XOR it + int k_group_idx_; + + public: + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + ScaleBiasTileIterator() + : pointer_(nullptr), + byte_offset_(0), + k_group_idx_(0) {} + + /// Constructor from TensorRef + CUTLASS_DEVICE + ScaleBiasTileIterator(TensorRef const &ref_scale_bias, + int lane_id) + : byte_offset_(0), k_group_idx_(0) { + /// 16816 only + pointer_ = reinterpret_cast(ref_scale_bias.data()) + + ((lane_id >> 3) & 1) * Shape::kContiguous / kElementsPerAccess + + (lane_id >> 4); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + ScaleBiasTileIterator &add_pointer_offset(LongIndex offset) { + byte_offset_ += offset * sizeof_bits::value / 8; + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_DEVICE + ScaleBiasTileIterator &add_tile_offset( + TensorCoord const &tile_offset) { + int whole_tiles = tile_offset.contiguous() / Policy::kGroupsPerTile; + int k_groups_delta = tile_offset.contiguous() % Policy::kGroupsPerTile; + + byte_offset_ += k_groups_delta * sizeof_bits::value * + kElementsPerAccess * Policy::LdsmShape::kContiguous / 8; + + // Multiply by 2 because scale and bias belonging to the same stage are next + // to each other in the shared memory. + pointer_ += (2 * whole_tiles * Shape::kContiguous / kElementsPerAccess); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + ScaleBiasTileIterator &operator++() { + byte_offset_ += Policy::LdsmShape::kContiguous * + sizeof_bits::value * kElementsPerAccess / 8; + + k_group_idx_++; + + if (k_group_idx_ == (Policy::kGroupsPerTile / kPartitionsK)) { + k_group_idx_ = 0; + byte_offset_ -= (Policy::kGroupsPerTile / kPartitionsK) * + Policy::LdsmShape::kContiguous * + sizeof_bits::value * kElementsPerAccess / 8; + add_tile_offset({Policy::kGroupsPerTile, 0}); + } + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + ScaleBiasTileIterator &operator--() { assert(0); } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + ScaleBiasTileIterator &operator+=( + TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + ScaleBiasTileIterator &operator-=( + TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + Array *fetch_ptr = + reinterpret_cast *>(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < 1; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) { + int access_idx = c + s * Policy::LdsmIterations::kContiguous; + + AccessType const *source_ptr = + pointer_ + Policy::LdsmShape::kContiguous * c; + + char const *source_byte_ptr = + reinterpret_cast(source_ptr) + byte_offset + + byte_offset_; + + cutlass::arch::ldsm( + fetch_ptr[access_idx], source_byte_ptr); + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = tile_offset.contiguous() * + InstructionShape::kContiguous / + kElementsPerAccess; + + byte_offset += sizeof_bits::value * pointer_offset / 8; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + k_group_idx_ = k_group % (Policy::kGroupsPerTile / kPartitionsK); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to +/// load from shared memory and therefore must be initialized with a TensorRef +/// to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Policy of the details of LDSM shape and iterations + typename Policy_, + /// Number of partitions along K dimension + int PartitionsK_> +class ScaleBiasTileIterator { + public: + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajor; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + using Policy = Policy_; + + /// Underlying tile iterator implementation + using Base = ScaleBiasTileIterator< + layout::PitchLinearShape, Element, + layout::PitchLinear, + layout::PitchLinearShape, + Policy, kThreads, PartitionsK_>; + + public: + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + + private: + /// Underlying tile iterator + Base iterator_; + + public: + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + ScaleBiasTileIterator() {} + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + ScaleBiasTileIterator(TensorRef const &ref_scale_bias, int lane_id) + : iterator_({ref_scale_bias.data(), ref_scale_bias.stride()}, lane_id) {} + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + ScaleBiasTileIterator &add_pointer_offset(LongIndex offset) { + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + ScaleBiasTileIterator &add_tile_offset( + TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_DEVICE + ScaleBiasTileIterator &add_tile_offset_negative( + TensorCoord const &tile_offset) { + iterator_.add_tile_offset_negative({tile_offset.column(), tile_offset.row()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + ScaleBiasTileIterator &operator++() { + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + ScaleBiasTileIterator &operator--() { + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + ScaleBiasTileIterator &operator+=( + TensorCoord const &tile_offset) { + add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of + ///< the tensor + CUTLASS_DEVICE + ScaleBiasTileIterator &operator-=( + TensorCoord const &tile_offset) { + add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { iterator_.load(frag); } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + assert(0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + assert(0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/softmax_scale_bias_transform.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/softmax_scale_bias_transform.h new file mode 100644 index 0000000000000000000000000000000000000000..bf8efe9f8a87a8431f41131e10b42bdeaf57086f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/softmax_scale_bias_transform.h @@ -0,0 +1,117 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing warp-level per-channel softmax before + matrix multiply-accumulate operations targeting Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/platform/platform.h" + +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/arch/mma_sm75.h" +#include "cutlass/arch/mma_sm80.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma.h" + +#include "cutlass/gemm/warp/mma_tensor_op_policy.h" + +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct SoftmaxScaleBiasTransform { + + using T = typename FragmentActivations::Element; + + static int const NumActivations = FragmentActivations::kElements; + static int const NumNormSum = FragmentNormSum::kElements; + static int const MmaElements = 2; + // One element has one scale and one bias + static int const MmaScaleBiasPair = 2; + // 16816 has 2 columns and 2 rows + static int const MmaCols = 2; + static int const MmaRows = 2; + + using MmaOperand = Array; + using NormSumOperand = Array<__half2, MmaScaleBiasPair>; + + CUTLASS_DEVICE + void transform(MmaOperand &activations, + NormSumOperand const &norm_sum) { + + __half2* packed_activations = reinterpret_cast<__half2*>(&activations); + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < MmaElements / 2; ++i) { + __half2 out = ::h2exp(__hsub2(packed_activations[i], norm_sum[2*i])); + packed_activations[i] = __hmul2(out, norm_sum[2*i + 1]); + } + } + + CUTLASS_DEVICE + void operator()(FragmentActivations &activations, + FragmentNormSum const &norm_sum) { + MmaOperand *ptr_activations = reinterpret_cast(&activations); + NormSumOperand const *ptr_norm_sum = + reinterpret_cast(&norm_sum); + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < (NumActivations / MmaElements); ++i) { + transform(ptr_activations[i], + ptr_norm_sum[i / (MmaCols * MmaRows) * MmaRows + i % MmaRows]); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/tile_iterator_planar_complex.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/tile_iterator_planar_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..1633dd2bc5b1d03afbdf87f35ef58f9074b6f472 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/gemm/warp/tile_iterator_planar_complex.h @@ -0,0 +1,250 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing warp-level matrix multiply-accumulate operations. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/array_planar_complex.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +class TileIteratorPlanarComplex { +public: + + /// Underlying iterator over real-valued tiles + using TileIterator = TileIterator_; + + /// Underlying element type + using Element = typename TileIterator::Element; + + /// Underlying layout type + using Layout = typename TileIterator::Layout; + + /// TensorRef type for loading element from a tensor + using TensorRef = typename TileIterator::TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Planar complex fragment + using Fragment = ArrayPlanarComplex; + +public: + + /// Underlying tile iterator + TileIterator tile_iterator_; + + /// Offset (in units of bytes) to the imaginary part of the planar complex matrix + LongIndex imaginary_offset_; + +public: + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + TileIteratorPlanarComplex(): imaginary_offset_(0) { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + TileIteratorPlanarComplex( + TensorRef const &ref, + int lane_id, + LongIndex imaginary_offset + ): + tile_iterator_(ref, lane_id), + imaginary_offset_((imaginary_offset * sizeof_bits::value) / 8) { } + + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + TileIteratorPlanarComplex &add_pointer_offset(LongIndex offset) { + + tile_iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + TileIteratorPlanarComplex &add_tile_offset(TensorCoord const &tile_offset) { + + tile_iterator_.add_tile_offset(tile_offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + TileIteratorPlanarComplex & operator++() { + ++tile_iterator_; + return *this; + } + + // + // WIP + // + + /// Advances the iterator along the opposite of the advance dimension + CUTLASS_HOST_DEVICE + TileIteratorPlanarComplex & operator--() { + --tile_iterator_; + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + TileIteratorPlanarComplex & operator+=(TensorCoord const &tile_offset) { + tile_iterator_.add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + TileIteratorPlanarComplex & operator-=(TensorCoord const &tile_offset) { + tile_iterator_.add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + tile_iterator_.load_with_byte_offset(frag.real, 0); + tile_iterator_.load_with_byte_offset(frag.imag, imaginary_offset_); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + tile_iterator_.load_with_byte_offset(frag.real, byte_offset); + tile_iterator_.load_with_byte_offset(frag.imag, byte_offset + imaginary_offset_); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + + Index byte_offset = (pointer_offset * sizeof_bits::value)/8; + + tile_iterator_.load_with_byte_offset(frag.real, byte_offset); + tile_iterator_.load_with_byte_offset(frag.imag, byte_offset + imaginary_offset_); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + + tile_iterator_.load_with_byte_offset(frag.real, tile_offset, 0); + tile_iterator_.load_with_byte_offset(frag.imag, tile_offset, imaginary_offset_); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + + Index byte_offset = (pointer_offset * sizeof_bits::value)/8; + + tile_iterator_.load_with_byte_offset(frag.real, tile_offset, byte_offset); + tile_iterator_.load_with_byte_offset(frag.real, tile_offset, byte_offset + imaginary_offset_); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + + tile_iterator_.load_with_byte_offset(frag.real, tile_offset, byte_offset); + tile_iterator_.load_with_byte_offset(frag.imag, tile_offset, byte_offset + imaginary_offset_); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + tile_iterator_.set_kgroup_index(k_group); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/layout.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/layout.h new file mode 100644 index 0000000000000000000000000000000000000000..6f638eb996b7d0b9c42213749c4b4ed8f5f6a0e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/layout.h @@ -0,0 +1,64 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines layout functions used by TensorRef and derived classes. + + Layout functions map logical coordinates to linear memory. They often require additional + data to describe strides between elements. + + Layout functions must implement all members in the public interface of IdentityTensorLayout<> + defined in cutlass/tensor_ref.h. +*/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/layout/vector.h" + +#include "cutlass/layout/tensor_op_multiplicand_sm70.h" +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace layout { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace layout +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/matrix.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/matrix.h new file mode 100644 index 0000000000000000000000000000000000000000..f0b4543fa1d27de9078fd78ae688b834820c15fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/matrix.h @@ -0,0 +1,1359 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines layout functions used by TensorRef and derived classes. + + Layout functions map logical coordinates to linear memory. They often require additional + data to describe strides between elements. + + Layout functions must implement all members in the public interface of IdentityTensorLayout<> + defined in cutlass/tensor_ref.h. +*/ + +/* + Note: CUTLASS 3x increases the host compiler requirements to C++17. However, certain + existing integrations of CUTLASS require C++11 host compilers. + + Until this requirement can be lifted, certain headers with this annotation are required + to be remain consistent with C++11 syntax. + + C++11 compatibility is enforced by this unit test: `cutlass_test_unit_core_cpp11`. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/pitch_linear_coord.h" + +namespace cutlass { +namespace layout { + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Defines data layouts of various matrix formats usable by TensorRef and other classes. +// +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Mapping function for row-major matrices. +class RowMajor { +public: + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + +private: + // + // Data members + // + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + RowMajor(LongIndex ldm = 0): stride_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajor(Stride stride): stride_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static RowMajor packed(MatrixCoord const &extent) { + return RowMajor(extent.column()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (row, column) + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord const &coord) const { + return LongIndex(coord.row()) * LongIndex(stride_[0]) + coord.column(); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + MatrixCoord inverse(LongIndex offset) const { + return MatrixCoord(Index(offset / stride_[0]), Index(offset % stride_[0])); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index stride(int idx) const { + return stride_[idx]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index & stride(int idx) { + return stride_[idx]; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(MatrixCoord const &extent) const { + return LongIndex(extent.row()) * LongIndex(stride_[0]); + } +}; + +/// Mapping function for column-major matrices. +class ColumnMajor { +public: + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + +private: + // + // Data members + // + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajor(LongIndex ldm = 0): stride_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajor(Stride stride): stride_(stride) { } + + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static ColumnMajor packed(MatrixCoord const &extent) { + return ColumnMajor(extent.row()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (row, column) + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord const &coord) const { + return LongIndex(coord.column()) * LongIndex(stride_[0]) + coord.row(); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + MatrixCoord inverse(LongIndex offset) const { + return MatrixCoord(Index(offset % stride_[0]), Index(offset / stride_[0])); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index stride(int idx) const { + return stride_[idx]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index & stride(int idx) { + return stride_[idx]; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(MatrixCoord const &extent) const { + return LongIndex(extent.column()) * LongIndex(stride_[0]); + } +}; + +/// Mapping function for interleaved matrices. Matrix is structured +/// as row-major arrangement of fixed-size columns. +template +struct RowMajorInterleaved { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + /// Size of interleaved columns + static int const kInterleave = Interleave; + +private: + // + // Data members + // + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorInterleaved(LongIndex ldm = 0): stride_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorInterleaved(Stride stride): stride_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static RowMajorInterleaved packed(MatrixCoord const &extent) { + return RowMajorInterleaved(extent.column() * kInterleave); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (row, column) + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord const &coord) const { + Index row_major = coord.row() / kInterleave; + Index row_minor = coord.row() % kInterleave; + return LongIndex(row_major) * LongIndex(stride_[0]) + LongIndex(coord.column()) * kInterleave + row_minor; + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + MatrixCoord inverse(LongIndex offset) const { + + Index row_major = Index(offset / stride_[0]); + Index residual = Index(offset % stride_[0]); + + Index column = residual / kInterleave; + Index row_minor = residual % kInterleave; + + return MatrixCoord(row_major * kInterleave + row_minor, column); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index stride(int idx) const { + return stride_[idx]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index & stride(int idx) { + return stride_[idx]; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(MatrixCoord const &extent) const { + return (extent.row() + kInterleave - 1) / kInterleave * stride_[0]; + } +}; + +/// Mapping function for interleaved matrices. Matrix is structured +/// as column-major arrangement of fixed-size rows. +template +struct ColumnMajorInterleaved { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + /// Size of interleaved columns + static int const kInterleave = Interleave; + +private: + // + // Data members + // + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorInterleaved(LongIndex ldm = 0): stride_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorInterleaved(Stride stride): stride_(stride) { } + + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static ColumnMajorInterleaved packed(MatrixCoord const &extent) { + return ColumnMajorInterleaved(extent.row() * kInterleave); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (row, column) + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord const &coord) const { + Index column_major = coord.column() / kInterleave; + Index column_minor = coord.column() % kInterleave; + return LongIndex(column_major) * LongIndex(stride_[0]) + LongIndex(coord.row()) * kInterleave + column_minor; + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + MatrixCoord inverse(LongIndex offset) const { + + Index column_major = Index(offset / stride_[0]); + Index residual = Index(offset % stride_[0]); + + Index row = residual / kInterleave; + Index column_minor = residual % kInterleave; + + return MatrixCoord(row, column_major * kInterleave + column_minor); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index stride(int idx) const { + return stride_[idx]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index & stride(int idx) { + return stride_[idx]; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(MatrixCoord const &extent) const { + return (extent.column() + kInterleave - 1) / kInterleave * stride_[0]; + } +}; + +/// Enumerated type for canonical pitch-linear matrix layouts +enum class Matrix { + kColumnMajor, ///< leading dimension refers to stride between columns; stride along rows is 1 + kRowMajor ///< leading dimension refers to stride between rows; stride along columns is 1 +}; + +/// Mapping function for scenario in which layout is row-major or column-major but this information +/// is only available at runtime. +struct ContiguousMatrix { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + +private: + // + // Data members + // + + /// Stride data member + Stride stride_; + + /// Enumerated type indicating canonical matrix layout + Matrix layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + ContiguousMatrix( + Index ldm = 0, + Matrix layout = Matrix::kColumnMajor + ): + stride_(ldm), layout_(layout) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static ContiguousMatrix packed( + MatrixCoord const &extent, + Matrix layout = Matrix::kColumnMajor) { + + Index ldm = 0; + if (layout == Matrix::kColumnMajor) { + ldm = extent.row(); + } + else if (layout == Matrix::kRowMajor) { + ldm = extent.column(); + } + return ContiguousMatrix(ldm, layout); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (row, column) + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord const &coord) const { + if (layout_ == Matrix::kColumnMajor) { + return coord.row() + coord.column() * stride_[0]; + } + else if (layout_ == Matrix::kRowMajor) { + return coord.row() * stride_[0] + coord.column(); + } + else { + // degenerate case + return 0; + } + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + MatrixCoord inverse(LongIndex offset) const { + CUTLASS_UNUSED(offset); + return MatrixCoord(0, 0); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index stride(int idx) const { + return stride_[idx]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index & stride(int idx) { + return stride_[idx]; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(MatrixCoord const &extent) const { + if (layout_ == Matrix::kColumnMajor) { + return stride_[0] * extent.column(); + } + else if (layout_ == Matrix::kRowMajor) { + return stride_[0] * extent.row(); + } + else { + // degenerate case + return 0; + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Mapping function for scenario in which both rows and columns are separated by a stride. +template +struct AffineRankN { + + /// Logical rank of tensor + static int const kRank = Rank; + + /// Rank of stride vector + static int const kStrideRank = kRank; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = Coord; + + /// Stride vector + using Stride = Coord; + +private: + // + // Data members + // + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + AffineRankN( + Stride const &stride = Stride() + ): + stride_(stride) { } + + /// Ctor + CUTLASS_HOST_DEVICE + AffineRankN( + Coord const &stride_m, + Coord const &stride_n + ) { + + // Concatenate the strides + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < kRank/2; ++m) { + stride_[m] = stride_m[m]; + } + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kRank/2; ++n) { + stride_[n + kRank/2] = stride_n[n]; + } + } + + /// Ctor for N = 2 + CUTLASS_HOST_DEVICE + AffineRankN( + LongIndex const &stride_m, + LongIndex const &stride_n + ) { + stride_[0] = stride_m; + stride_[1] = stride_n; + } + + /// Ctor for N = 2 + CUTLASS_HOST_DEVICE + AffineRankN( + LongIndex const &stride + ) { + stride_[0] = stride; + stride_[1] = 1; + } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static AffineRankN packed(TensorCoord const &extent) { + + AffineRankN layout; + layout.stride_[kRank - 1] = 1; + + CUTLASS_PRAGMA_UNROLL + for (int i = kRank - 1; i > 0; --i) { + layout.stride_[i - 1] = layout.stride_[i] * extent[i]; + } + + return layout; + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (row, column) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return dot(coord, stride_); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + return TensorCoord(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index stride(int idx) const { + return stride_[idx]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index & stride(int idx) { + return stride_[idx]; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + int idx = stride_.max_dim_index(); + return extent[idx] * stride_[idx]; + } +}; + +/// Mapping function for scenario in which both rows and columns are separated by a stride. +/// Row stride is smaller than column stride in AffineRank2ColumnMajor. +struct AffineRank2ColumnMajor { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 2; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + +private: + // + // Data members + // + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + AffineRank2ColumnMajor( + Stride const &stride = Stride() + ): + stride_(stride) { } + + /// Ctor + CUTLASS_HOST_DEVICE + AffineRank2ColumnMajor( + LongIndex row_stride, ///< stride between elements in consecutive rows + LongIndex column_stride ///< stride between elements in consecutive columns + ) + { stride_[0] = row_stride; stride_[1] = column_stride;} + + /// Ctor + CUTLASS_HOST_DEVICE + AffineRank2ColumnMajor( + LongIndex stride + ) + { stride_[0] = 1; stride_[1] = stride;} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static AffineRank2ColumnMajor packed(MatrixCoord const &extent) { + return AffineRank2ColumnMajor(1, extent.row()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (row, column) + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord const &coord) const { + return dot(coord, stride_); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + MatrixCoord inverse(LongIndex offset) const { + CUTLASS_UNUSED(offset); + return MatrixCoord(0, 0); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index stride(int idx) const { + return stride_[idx]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index & stride(int idx) { + return stride_[idx]; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(MatrixCoord const &extent) const { + return extent.column() * stride_[1]; + } +}; + +/// Mapping function for scenario in which both rows and columns are separated by a stride. +/// Column stride is smaller than row stride in AffineRank2RowMajor. +struct AffineRank2RowMajor { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 2; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + +private: + // + // Data members + // + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + AffineRank2RowMajor( + Stride const &stride = Stride() + ): + stride_(stride) { } + + /// Ctor + CUTLASS_HOST_DEVICE + AffineRank2RowMajor( + LongIndex row_stride, ///< stride between elements in consecutive rows + LongIndex column_stride ///< stride between elements in consecutive columns + ) { stride_[0] = row_stride; stride_[1] = column_stride;} + + /// Ctor + CUTLASS_HOST_DEVICE + AffineRank2RowMajor( + LongIndex stride + ) { stride_[0] = stride; stride_[1] = 1;} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static AffineRank2RowMajor packed(MatrixCoord const &extent) { + return AffineRank2RowMajor(1, extent.row()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (row, column) + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord const &coord) const { + return dot(coord, stride_); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + MatrixCoord inverse(LongIndex offset) const { + CUTLASS_UNUSED(offset); + return MatrixCoord(0, 0); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index stride(int idx) const { + return stride_[idx]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index & stride(int idx) { + return stride_[idx]; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(MatrixCoord const &extent) const { + return extent.row() * stride_[0]; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// Utility functions to convert stride_factor to the strides used by the Affine2 layout. +// +// stride_factor is the logical distance between two coorinates. +// +// All Coodinates used here are matrix coordinates. stride[0] and extent[0] are for the +// rows. stride[1] and extent[1] are for the columns. +template + struct Affine2Layout_Factory { + CUTLASS_HOST_DEVICE + static Affine2Layout layout_factory(cutlass::Coord<2> const &extent, typename Affine2Layout::Stride stride_factor) { + return Affine2Layout::packed(extent); + } +}; + +template <> +struct Affine2Layout_Factory { +CUTLASS_HOST_DEVICE +static cutlass::layout::AffineRank2ColumnMajor layout_factory( + cutlass::Coord<2> const &extent, + typename cutlass::layout::AffineRank2ColumnMajor::Stride stride_factor) { + return cutlass::layout::AffineRank2ColumnMajor({ stride_factor[0], stride_factor[0] * stride_factor[1] * extent[0] }); + } +}; + +template <> +struct Affine2Layout_Factory { +CUTLASS_HOST_DEVICE +static cutlass::layout::AffineRank2RowMajor layout_factory( + cutlass::Coord<2> const &extent, + typename cutlass::layout::AffineRank2RowMajor::Stride stride_factor) { + return cutlass::layout::AffineRank2RowMajor({ stride_factor[0] * stride_factor[1] * extent[1], stride_factor[1] }); + } +}; + +// The base layout cutlass::layout::AffineRankN<2> is similar to AffineRank2ColumnMajor +template <> +struct Affine2Layout_Factory> { +CUTLASS_HOST_DEVICE +static cutlass::layout::AffineRankN<2> layout_factory( + cutlass::Coord<2> const &extent, + typename cutlass::layout::AffineRankN<2>::Stride stride_factor) { + return cutlass::layout::AffineRankN<2>({ stride_factor[0], stride_factor[0] * stride_factor[1] * extent[0] }); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Mapping function for block-linear matrices. Matrix is structured +/// as column-major arrangement of 2D tiles (that are column-major). +template +struct ColumnMajorBlockLinear { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + /// Size of a block in rows + static int const kBlockRows = BlockRows; + + /// Size of a block in columns + static int const kBlockColumns = BlockColumns; + +private: + // + // Data members + // + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorBlockLinear(Index ldm = 0): stride_(ldm) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static ColumnMajorBlockLinear packed(MatrixCoord const &extent) { + return ColumnMajorBlockLinear(extent.row() * kBlockRows * kBlockColumns); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (row, column) + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord const &coord) const { + return + (coord.row() % kBlockRows) + + (coord.column() % kBlockColumns) * kBlockRows + + (coord.row() / kBlockRows) * kBlockRows * kBlockColumns + + (coord.column() / kBlockColumns) * stride_[0]; + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + MatrixCoord inverse(LongIndex offset) const { + + return MatrixCoord(0, 0); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index stride(int idx) const { + return stride_[idx]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index & stride(int idx) { + return stride_[idx]; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(MatrixCoord const &extent) const { + return (extent.column() + kBlockColumns - 1) / kBlockColumns * stride_[0]; + } +}; + +/// Mapping function for block-linear matrices. Matrix is structured +/// as row-major arrangement of 2D tiles (that are row-major) +template +struct RowMajorBlockLinear { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + /// Size of a block in rows + static int const kBlockRows = BlockRows; + + /// Size of a block in columns + static int const kBlockColumns = BlockColumns; + +private: + // + // Data members + // + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorBlockLinear(Index ldm = 0): stride_(ldm) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static RowMajorBlockLinear packed(MatrixCoord const &extent) { + return RowMajorBlockLinear(extent.column() * kBlockRows * kBlockColumns); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (row, column) + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord const &coord) const { + return + (coord.column() % kBlockColumns) + + (coord.row() % kBlockRows) * kBlockColumns + + (coord.column() / kBlockColumns) * kBlockRows * kBlockColumns + + (coord.row() / kBlockRows) * stride_[0]; + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + MatrixCoord inverse(LongIndex offset) const { + return MatrixCoord(0, 0); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index stride(int idx) const { + return stride_[idx]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index & stride(int idx) { + return stride_[idx]; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(MatrixCoord const &extent) const { + return (extent.row() + kBlockRows - 1) / kBlockRows * stride_[0]; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +struct GeneralMatrix { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 2; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + +private: + // + // Data members + // + + Matrix layout_id_; + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + GeneralMatrix(): layout_id_(Matrix::kColumnMajor), stride_(make_Coord(0, 1)) { } + + /// Ctor + CUTLASS_HOST_DEVICE + GeneralMatrix( + Matrix layout_id, + Index ldm, + Index interleave): layout_id_(layout_id), stride_(make_Coord(ldm, interleave)) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static GeneralMatrix packed( + MatrixCoord const &extent, + Matrix layout_id = Matrix::kColumnMajor, + Index interleave = 1) { + + Index c; + if (layout_id == Matrix::kRowMajor) { + c = extent.column(); + } + else { + c = extent.row(); + } + + Index ldm = c * interleave; + + return GeneralMatrix(layout_id, ldm, interleave); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (row, column) + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord const &coord) const { + Index c, s; + if (layout_id_ == Matrix::kRowMajor) { + c = coord.column(); + s = coord.row(); + } + else { + s = coord.column(); + c = coord.row(); + } + + Index v = s / stride_[1]; + Index residual = (s % stride_[1]); + + return LongIndex(c) * LongIndex(stride_[1]) + LongIndex(v) * LongIndex(stride_[0]) + residual; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + CUTLASS_HOST_DEVICE + Matrix layout_id() const { + return layout_id_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + CUTLASS_HOST_DEVICE + Matrix & layout_id() { + return layout_id_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index stride(int idx) const { + return stride_[idx]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + typename Stride::Index & stride(int idx) { + return stride_[idx]; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(MatrixCoord const &extent) const { + Index s; + if (layout_id_ == Matrix::kRowMajor) { + s = extent.row(); + } + else { + s = extent.column(); + } + + Index v = Index((s + stride_[1] - 1) / stride_[1]); + return LongIndex(v) * LongIndex(stride_[0]); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Defines transposes of matrix layouts +template +struct LayoutTranspose; + +/// Transpose of row-major is column-major +template <> +struct LayoutTranspose { + using type = layout::ColumnMajor; +}; + +/// Transpose of column-major is row-major +template <> +struct LayoutTranspose { + using type = layout::RowMajor; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace layout +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/permute.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/permute.h new file mode 100644 index 0000000000000000000000000000000000000000..8e1f4ceeaa5d7dfc06eaf9fb1cf7fda2ec87d239 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/permute.h @@ -0,0 +1,828 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines layout functions used by GEMM+permute path for common tensor or matrix formats. + + Like Layout functions, permute layout functions map logical coordinates to linear memory. They often require additional + data to describe strides between elements. + + Permute layout functions must implement all members in the interface of NoPermute<> defined in this file. Address offset + computation lies in operator() with private member variables {col_permute_, row_permute_ and stride_} as new addresses after permute op. +*/ +#pragma once +#if defined(__CUDACC_RTC__) +#include +#else +#include "assert.h" +#endif +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/coord.h" +#include "cutlass/tensor_coord.h" + +namespace cutlass { +namespace layout { + +// template +// struct PermuteSelect { +// // Try to give a reasonable error message to the user +// static_assert(!platform::is_same::value, // aka always_false +// "You've tried to use a layout permutation for which the implementation is not availble. " +// "In order to provide an implementation for a particular combination of matrix layout " +// "and direction (direct/inverse), please specialize PermuteSelect trait."); +// }; + +// Base template for defining specializations of permutation inverses +template +struct InversePermute +{ + // Try to give a reasonable error message to the user + static_assert(!platform::is_same::value, // aka always_false + "To apply permutation to a GEMM input operand (A or B), an inverse permutation for the desired " + "permute class must be defined and enabled by specializing cutlass::layout::InversePermute trait."); +}; + +class PermuteBase { +public: + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; +}; + +class NoPermute : public PermuteBase { +public: + // + // Methods + // + + /// Constructor from matrix extent + CUTLASS_HOST_DEVICE + NoPermute(MatrixCoord extent, Index stride) { }; + + /// Constructor from pitch-linear extent + CUTLASS_HOST_DEVICE + NoPermute(PitchLinearCoord extent, Index stride) { }; + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord coord) const { return 0; } // not correct but should never be called + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(PitchLinearCoord coord) const { return 0; } // not correct but should never be called +}; + +template<> +struct InversePermute { + using type = NoPermute; +}; + +/// Helper trait to detect if permute operation is a noop +template +inline bool constexpr is_trivial_permute = platform::is_same::value; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Defines permute layouts of various tensor formats. +// +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Tensor4DPermute0213 +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Permute layout function for 4-D permuted tensors with matrix (dimensions [M, N]) reshaped +/// as [M/D1, D1, D2, N/D2]. Then perform permute([0, 2, 1, 3]) on the corresponding tensor. +template +class Tensor4DPermute0213RowMajor : public PermuteBase { +private: + // + // Data members + // + + Index D3_; + + Index stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor4DPermute0213RowMajor(MatrixCoord extent, Index stride) { + + assert(extent.row() % D1 == 0); + assert(extent.column() % D2 == 0); + + D3_ = extent.column() / D2; + + stride_ = stride * D1 / D2; + } + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor4DPermute0213RowMajor(PitchLinearCoord extent, Index stride) + : Tensor4DPermute0213RowMajor(MatrixCoord(extent.strided(), extent.contiguous()), stride) {} + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord coord) const { + + // [i,j,k,l] -> [i,k,j,l] + Index l = coord.column() % D3_; + Index k = coord.column() / D3_; + Index j = coord.row() % D1; + Index i = coord.row() / D1; + + MatrixCoord permuted{k + i * D2, l + j * D3_}; + + return LongIndex(permuted.row()) * LongIndex(stride_) + LongIndex(permuted.column()); + } + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(PitchLinearCoord coord) const { + return operator()(MatrixCoord(coord.strided(), coord.contiguous())); + } +}; + +// Inverse for Tensor4DPermute0213 can be implemented by simply swapping D1 and D2 +template +class Tensor4DPermute0213RowMajorInverse : public Tensor4DPermute0213RowMajor { +public: + using Base = Tensor4DPermute0213RowMajor; + using Base::Base; +}; + +template +struct InversePermute> { + using type = Tensor4DPermute0213RowMajorInverse; +}; + +template +struct InversePermute> { + using type = Tensor4DPermute0213RowMajor; +}; + +/// Permute layout function for 4-D permuted tensors with matrix (dimensions [M, N]) reshaped +/// as [M/D1, D1, D2, N/D2]. Then perform permute([0, 2, 1, 3]) on the corresponding tensor. +template +class Tensor4DPermute0213ColumnMajor : public PermuteBase { +private: + // + // Data members + // + + Index D0_; + + Index stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor4DPermute0213ColumnMajor(MatrixCoord extent, Index stride) { + + assert(extent.row() % D1 == 0); + assert(extent.column() % D2 == 0); + + D0_ = extent.row() / D1; + + stride_ = stride * D2 / D1; + } + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor4DPermute0213ColumnMajor(PitchLinearCoord extent, Index stride) + : Tensor4DPermute0213ColumnMajor(MatrixCoord(extent.contiguous(), extent.strided()), stride) {} + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord coord) const { + + // [i,j,k,l] -> [i,k,j,l] + Index l = coord.column() / D2; + Index k = coord.column() % D2; + Index j = coord.row() / D0_; + Index i = coord.row() % D0_; + + MatrixCoord permuted{i + k * D0_, j + l * D1}; + + return LongIndex(permuted.row()) + LongIndex(permuted.column()) * LongIndex(stride_); + } + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(PitchLinearCoord coord) const { + return operator()(MatrixCoord(coord.contiguous(), coord.strided())); + } +}; + +// Inverse for Tensor4DPermute0213 can be implemented by simply swapping D1 and D2 +template +class Tensor4DPermute0213ColumnMajorInverse : public Tensor4DPermute0213ColumnMajor { +public: + using Base = Tensor4DPermute0213ColumnMajor; + using Base::Base; +}; + +template +struct InversePermute> { + using type = Tensor4DPermute0213ColumnMajorInverse; +}; + +template +struct InversePermute> { + using type = Tensor4DPermute0213ColumnMajor; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Tensor4DPermuteBMM0213 +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Permute layout function for 4-D permuted tensors for BMM with BMM tensor (dimensions [B, M, N]) reshaped +/// as [B/D1, D1, M, N]. Then perform permute([0, 2, 1, 3]) on the corresponding whole BMM tensor. +template +class Tensor4DPermuteBMM0213RowMajor : public PermuteBase { +private: + // + // Data members + // + + Index D3_; + + Index stride_; + + Index batch_stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor4DPermuteBMM0213RowMajor(MatrixCoord extent, Index stride) { + + Index D2 = extent.row(); + D3_ = extent.column(); + + stride_ = stride * D1; + batch_stride_ = D2 * stride_; + } + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor4DPermuteBMM0213RowMajor(PitchLinearCoord extent, Index stride) + : Tensor4DPermuteBMM0213RowMajor(MatrixCoord(extent.strided(), extent.contiguous()), stride) {} + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord coord) const { + + // The batch index for BMM + Index BMM_batch_idx = blockIdx.z; + + // [i,j,k,l] -> [i,k,j,l] + Index l = coord.column(); + Index k = coord.row(); + Index j = BMM_batch_idx % D1; + Index i = BMM_batch_idx / D1; + + Index pbatch = i; + MatrixCoord pcoord{k, l + j * D3_}; + + return pbatch * LongIndex(batch_stride_) + pcoord.row() * LongIndex(stride_) + pcoord.column(); + } + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(PitchLinearCoord coord) const { + return operator()(MatrixCoord(coord.strided(), coord.contiguous())); + } +}; + +template +class Tensor4DPermuteBMM0213RowMajorInverse : public PermuteBase { +private: + // + // Data members + // + + Index D3_; + + Index stride_; + + Index batch_stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor4DPermuteBMM0213RowMajorInverse(MatrixCoord extent, Index stride) { + + assert(extent.column() % D1 == 0); + + Index D2 = extent.row(); + D3_ = extent.column() / D1; + + stride_ = stride / D1; + + batch_stride_ = D2 * stride_; + } + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor4DPermuteBMM0213RowMajorInverse(PitchLinearCoord extent, Index stride) + : Tensor4DPermuteBMM0213RowMajorInverse(MatrixCoord(extent.strided(), extent.contiguous()), stride) {} + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord coord) const { + + // The batch index for BMM + Index BMM_batch_idx = blockIdx.z; + + // The following assumes grouping [(D0)->batch, (D2)->row, (D1,D3)->col] + Index l = coord.column() % D3_; + Index j = coord.column() / D3_; + Index k = coord.row(); + Index i = BMM_batch_idx; + + // compute original [batch, row, col] index + Index pbatch = j + i * D1; + MatrixCoord pcoord{k, l}; + + return pbatch * LongIndex(batch_stride_) + pcoord.row() * LongIndex(stride_) + pcoord.column(); + } + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(PitchLinearCoord coord) const { + return operator()(MatrixCoord(coord.strided(), coord.contiguous())); + } +}; + +template +struct InversePermute> { + using type = Tensor4DPermuteBMM0213RowMajorInverse; +}; + +template +struct InversePermute> { + using type = Tensor4DPermuteBMM0213RowMajor; +}; + +/// Permute layout function for 4-D permuted tensors for BMM with BMM tensor (dimensions [B, M, N]) reshaped +/// as [B/D1, D1, M, N]. Then perform permute([0, 3, 2, 1]) on the corresponding whole BMM tensor. +template +class Tensor4DPermuteBMM0321ColumnMajor : public PermuteBase { +private: + // + // Data members + // + + Index D2_; + + Index stride_; + + Index batch_stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor4DPermuteBMM0321ColumnMajor(MatrixCoord extent, Index stride) { + + D2_ = extent.row(); + Index D3 = extent.column(); + + stride_ = stride * D1; + batch_stride_ = stride_ * D3; + } + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor4DPermuteBMM0321ColumnMajor(PitchLinearCoord extent, Index stride) + : Tensor4DPermuteBMM0321ColumnMajor(MatrixCoord(extent.contiguous(), extent.strided()), stride) {} + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord coord) const { + + Index BMM_batch_idx = blockIdx.z; + + // [i,j,k,l] -> [i,k,j,l] + Index l = coord.column(); + Index k = coord.row(); + Index j = BMM_batch_idx % D1; + Index i = BMM_batch_idx / D1; + + Index pbatch = i; + MatrixCoord pcoord{k + j * D2_, l}; + + return pbatch * LongIndex(batch_stride_) + pcoord.row() + pcoord.column() * LongIndex(stride_); + } + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(PitchLinearCoord coord) const { + return operator()(MatrixCoord(coord.contiguous(), coord.strided())); + } +}; + +template +class Tensor4DPermuteBMM0321ColumnMajorInverse : public PermuteBase { +private: + // + // Data members + // + + Index D2_; + + Index stride_; + + Index batch_stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor4DPermuteBMM0321ColumnMajorInverse(MatrixCoord extent, Index stride) { + + assert(extent.row() % D1 == 0); + + D2_ = extent.row() / D1; + Index D3 = extent.column(); + + stride_ = stride / D1; + batch_stride_ = stride_ * D3; + } + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor4DPermuteBMM0321ColumnMajorInverse(PitchLinearCoord extent, Index stride) + : Tensor4DPermuteBMM0321ColumnMajorInverse(MatrixCoord(extent.contiguous(), extent.strided()), stride) {} + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord coord) const { + + Index BMM_batch_idx = blockIdx.z; + + // The following assumes grouping [(D0)->batch, (D1,D2)->row, (D3)->col] + Index l = coord.column(); + Index k = coord.row() % D2_; + Index j = coord.row() / D2_; + Index i = BMM_batch_idx; + + Index pbatch = i * D1 + j; + MatrixCoord pcoord{k, l}; + + return pbatch * LongIndex(batch_stride_) + pcoord.row() + pcoord.column() * LongIndex(stride_); + } + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(PitchLinearCoord coord) const { + return operator()(MatrixCoord(coord.contiguous(), coord.strided())); + } +}; + +template +struct InversePermute> { + using type = Tensor4DPermuteBMM0321ColumnMajorInverse; +}; + +template +struct InversePermute> { + using type = Tensor4DPermuteBMM0321ColumnMajor; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Tensor5DPermute20314 +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Permute layout function for 5-D permuted tensors with output matrix (dimension as [M, N]) reshaped +/// as [M/T1, T1, T2, T3, N/T2/T3]. Then perform permute([2, 0, 3, 1, 4]) on the corresponding output tensor. +template +class Tensor5DPermute20314RowMajor : public PermuteBase { +private: + // + // Data members + // + + Index T0_; + + Index T4_; + + Index stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor5DPermute20314RowMajor(MatrixCoord extent, Index stride) { + + assert(extent.row() % T1 == 0); + assert(extent.column() % (T2 * T3) == 0); + + T0_ = extent.row() / T1; + T4_ = extent.column() / (T2 * T3); + + /// Update stride_permute with stride + stride_ = stride / T2 * T1; // stride in Elements + } + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor5DPermute20314RowMajor(PitchLinearCoord extent, Index stride) + : Tensor5DPermute20314RowMajor(MatrixCoord(extent.strided(), extent.contiguous()), stride) {} + + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord coord) const { + + // Permute as torch.permute(X1, [2, 0, 3, 1, 4]) -> 5D Tensor indices as [i,j,k,l,m], the dimension of X + // is [T0, T1, T2, T3, T4], after permutation the dim of X1 is [T2, T0, T3, T1, T4]. + + Index m = coord.column() % T4_; + Index l = (coord.column() / T4_) % T3; + Index k = (coord.column() / T4_) / T3; + Index j = coord.row() % T1; + Index i = coord.row() / T1; + + MatrixCoord permuted{i + k * T0_, m + j * T4_ + l * T1 * T4_}; + + return LongIndex(permuted.row()) * LongIndex(stride_) + LongIndex(permuted.column()); + } + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(PitchLinearCoord coord) const { + return operator()(MatrixCoord(coord.strided(), coord.contiguous())); + } +}; + +/// Inverse for Tensor5DPermute20314 (could also be given a proper name, e.g. Tensor5DPermute13024). +template +class Tensor5DPermute20314RowMajorInverse : public PermuteBase { +private: + // + // Data members + // + + Index T0_; + + Index T4_; + + // Permuted stride in units of elements + Index stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor5DPermute20314RowMajorInverse(MatrixCoord extent, Index stride) { + + assert(extent.row() % T2 == 0); + assert(extent.column() % (T1 * T3) == 0); + + T0_ = extent.row() / T2; + T4_ = extent.column() / (T1 * T3); + + stride_ = stride / T1 * T2; + } + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor5DPermute20314RowMajorInverse(PitchLinearCoord extent, Index stride) + : Tensor5DPermute20314RowMajorInverse(MatrixCoord(extent.strided(), extent.contiguous()), stride) {} + + /// Computes the offset after the inverse of permute operation in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord coord) const { + + Index m = coord.column() % T4_; + Index j = (coord.column() / T4_) % T1; + Index l = (coord.column() / T4_) / T1; + Index i = coord.row() % T0_; + Index k = coord.row() / T0_; + + MatrixCoord permuted{j + i * T1, m + l * T4_ + k * T3 * T4_}; + + return LongIndex(permuted.row()) * LongIndex(stride_) + LongIndex(permuted.column()); + } + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(PitchLinearCoord coord) const { + return operator()(MatrixCoord(coord.strided(), coord.contiguous())); + } +}; + +template +struct InversePermute> { + using type = Tensor5DPermute20314RowMajorInverse; +}; + +template +struct InversePermute> { + using type = Tensor5DPermute20314RowMajor; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Tensor5DPermute02413 +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Permute layout function for 5-D permuted tensors with matrix (dimensions [M, N]) reshaped +/// as [M/T1, T1, T2, T3, N/T2/T3]. Then perform permute([0, 2, 4, 1, 3]) on the corresponding tensor. +template +class Tensor5DPermute02413ColumnMajor : public PermuteBase { +private: + // + // Data members + // + + Index T0_; + + Index T4_; + + Index stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor5DPermute02413ColumnMajor(MatrixCoord extent, Index stride) { + + assert(extent.row() % T1 == 0); + assert(extent.column() % (T2 * T3) == 0); + + T0_ = extent.row() / T1; + T4_ = extent.column() / (T2 * T3); + + /// Update stride_permute with stride + stride_ = stride / T1 * T2; // stride in Elements + } + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor5DPermute02413ColumnMajor(PitchLinearCoord extent, Index stride) + : Tensor5DPermute02413ColumnMajor(MatrixCoord(extent.contiguous(), extent.strided()), stride) {} + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord coord) const { + + // Permute as torch.permute(X1, [2, 0, 3, 1, 4]) -> 5D Tensor indices as [i,j,k,l,m], the dimension of X + // is [T0, T1, T2, T3, T4], after permutation the dim of X1 is [T0, T2, T4, T1, T3]. + + Index m = (coord.column() / T2) / T3; + Index l = (coord.column() / T2) % T3; + Index k = coord.column() % T2; + Index j = coord.row() / T0_; + Index i = coord.row() % T0_; + + MatrixCoord permuted{i + k * T0_, m + j * T4_ + l * T4_ * T1}; + + return LongIndex(permuted.row()) + LongIndex(permuted.column()) * LongIndex(stride_); + } + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(PitchLinearCoord coord) const { + return operator()(MatrixCoord(coord.contiguous(), coord.strided())); + } +}; + +/// Inverse for Tensor5DPermute02413ColumnMajor +template +class Tensor5DPermute02413ColumnMajorInverse : public PermuteBase { +private: + // + // Data members + // + + Index T0_; + + Index T4_; + + // Permuted stride in units of elements + Index stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor5DPermute02413ColumnMajorInverse(MatrixCoord extent, Index stride) { + + assert(extent.row() % T2 == 0); + assert(extent.column() % (T1 * T3) == 0); + + T0_ = extent.row() / T2; + T4_ = extent.column() / (T1 * T3); + + stride_ = stride / T2 * T1; + } + + /// Constructor + CUTLASS_HOST_DEVICE + Tensor5DPermute02413ColumnMajorInverse(PitchLinearCoord extent, Index stride) + : Tensor5DPermute02413ColumnMajorInverse(MatrixCoord(extent.contiguous(), extent.strided()), stride) {} + + /// Computes the offset after the inverse of permute operation in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(MatrixCoord coord) const { + + Index m = coord.column() % T4_; + Index j = (coord.column() / T4_) % T1; + Index l = (coord.column() / T4_) / T1; + Index i = coord.row() % T0_; + Index k = coord.row() / T0_; + + MatrixCoord permuted{i + j * T0_, k + l * T2 + m * T2 * T3}; + + return LongIndex(permuted.row()) + LongIndex(permuted.column()) * LongIndex(stride_); + } + + /// Computes the offset after Permute Op in logical elements + CUTLASS_HOST_DEVICE + LongIndex operator()(PitchLinearCoord coord) const { + return operator()(MatrixCoord(coord.contiguous(), coord.strided())); + } +}; + +template +struct InversePermute> { + using type = Tensor5DPermute02413ColumnMajorInverse; +}; + +template +struct InversePermute> { + using type = Tensor5DPermute02413ColumnMajor; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace layout +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/pitch_linear.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/pitch_linear.h new file mode 100644 index 0000000000000000000000000000000000000000..eefccf8f905ebd6734dbd5b26d94d6f4e0ea314e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/pitch_linear.h @@ -0,0 +1,159 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines layout functions used by TensorRef and derived classes for pitch-linear memory. +*/ + +/* + Note: CUTLASS 3x increases the host compiler requirements to C++17. However, certain + existing integrations of CUTLASS require C++11 host compilers. + + Until this requirement can be lifted, certain headers with this annotation are required + to be remain consistent with C++11 syntax. + + C++11 compatibility is enforced by this unit test: `cutlass_test_unit_core_cpp11`. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/coord.h" +#include "cutlass/pitch_linear_coord.h" + +namespace cutlass { +namespace layout { + +template + using PitchLinearShape = cutlass::PitchLinearShape < Contiguous, Strided >; + using PitchLinearCoord = PitchLinearCoord; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Mapping function for pitch-linear memory +class PitchLinear { +public: + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = PitchLinearCoord; + + /// Stride vector + using Stride = Coord; + +private: + // + // Data members + // + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + PitchLinear(LongIndex ldm = 0): stride_(ldm) { } + + /// Constructor + CUTLASS_HOST_DEVICE + PitchLinear(Stride _stride): stride_(_stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static PitchLinear packed(TensorCoord const &extent) { + return PitchLinear(extent.contiguous()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return LongIndex(coord.contiguous()) + LongIndex(coord.strided()) * LongIndex(stride_[0]); + } + + /// Returns the logical coordinate given an offset. + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex index) const { + return make_Coord( + TensorCoord::Index(index % stride_[0]), + TensorCoord::Index(index / stride_[0]) + ); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + LongIndex stride(int rank) const { + return stride_[rank]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + LongIndex & stride(int rank) { + return stride_[rank]; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return extent.strided() * stride_[0]; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace layout +} // namespace cutlass + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/tensor.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..0f10e865fd54c4ae548a75c6bd663abb376c418e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/tensor.h @@ -0,0 +1,639 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines layout functions used by TensorRef and derived classes for common 4-D and 5-D + tensor formats. + + Layout functions map logical coordinates to linear memory. They often require additional + data to describe strides between elements. + + Layout functions must implement all members in the public interface of IdentityTensorLayout<> + defined in cutlass/tensor_ref.h. +*/ +#pragma once +#if defined(__CUDACC_RTC__) +#include +#else +#include "assert.h" +#endif +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/coord.h" +#include "cutlass/tensor_coord.h" + +namespace cutlass { +namespace layout { + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Defines data layouts of various tensor formats usable by TensorRef and other classes. +// +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tag used for 3-D NWC tensors for 1D conv, only used in 3.x API +class TensorNWC {}; + +/// Mapping function for 4-D NHWC tensors. +class TensorNHWC { +public: + /// Logical rank of tensor + static int const kRank = 4; + + /// Rank of stride vector + static int const kStrideRank = 3; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate (n, h, w, c) + using TensorCoord = Tensor4DCoord; + + /// Stride vector + using Stride = Coord; + +private: + // + // Data members + // + + /// Stride data member - [stride_w, stride_h, stride_n] + Stride stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + TensorNHWC(Stride const &stride = Stride(0)): stride_(stride) { } + + /// Constructor + CUTLASS_HOST_DEVICE + TensorNHWC( + typename Stride::Index stride_w, ///< number of elements between adjacent W coordinates + typename Stride::Index stride_h, ///< number of elements between adjacent H coordinates + typename Stride::Index stride_n ///< number of elements between adjacent N coordinates + ): + stride_(make_Coord(stride_w, stride_h, stride_n)) { } + + /// Constructor + // Once convolutions implement 64b stride this ctor can be deleted + CUTLASS_HOST_DEVICE + TensorNHWC(Coord const &stride): + stride_(make_Coord( + static_cast(stride[0]), + static_cast(stride[1]), + static_cast(stride[2])) + ) { } + + /// Helper returns a layout to a tightly packed NHWC tensor. + CUTLASS_HOST_DEVICE + static TensorNHWC packed(TensorCoord const &extent) { + return TensorNHWC( + make_Coord( + extent.c(), + extent.w() * extent.c(), + extent.h() * extent.w() * extent.c() + ) + ); + } + + /// Returns the offset of a coordinate (n, h, w, c) in linear memory. + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return coord.c() + + LongIndex(stride_[0] * coord.w()) + + LongIndex(stride_[1] * coord.h()) + + LongIndex(stride_[2] * coord.n()); + } + + /// Returns the offset of a pitchlinear coordinate in linear memory. + CUTLASS_HOST_DEVICE + LongIndex operator()(PitchLinearCoord coord) const { + return coord.contiguous() + LongIndex(coord.strided() * stride_[2]); + } + + /// Returns the logical coordinate (n, h, w, c) from a given offset in linear memory. + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex index) const { + + int n = 0, h = 0, w = 0, c = 0; + + #if defined(__CUDA_ARCH__) + int tmp = 0; + c = int(index % static_cast(stride_[0])); + + unsigned int hw_mul, hw_shr, w_mul, w_shr, c_mul, c_shr; + + find_divisor(hw_mul, hw_shr, stride_[2]); + find_divisor(w_mul, w_shr, stride_[1]); + find_divisor(c_mul, c_shr, stride_[0]); + + fast_divmod(n, tmp, index, int(stride_[2]), hw_mul, hw_shr); + fast_divmod(h, w, tmp, int(stride_[1]), w_mul, w_shr); + fast_divmod(w, tmp, w, int(stride_[0]), c_mul, c_shr); + #else + + n = int(index / stride_[2]); + LongIndex residual = index % stride_[2]; + + h = int(residual / stride_[1]); + residual = (residual % stride_[1]); + + w = int(residual / stride_[0]); + c = int(residual % stride_[0]); + + #endif + return TensorCoord(n, h, w, c); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + // it does not make sense if the extent is larger than stride + // and we could not rely on the capacity calculation in such cases + // we could move this checkers to debug code only + if ((extent.c() > stride_[0]) + || (extent.w() * stride_[0] > stride_[1]) + || (extent.h() * stride_[1] > stride_[2])) { + assert(0); + } + return extent.n() * stride_[2]; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Mapping function for 4-D NCHW tensors. +class TensorNCHW { +public: + /// Logical rank of tensor + static int const kRank = 4; + + /// Rank of stride vector + static int const kStrideRank = 3; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = Tensor4DCoord; + + /// Stride vector + using Stride = Coord; + +private: + // + // Data members + // + + /// Stride data member - [w, hw, chw] + Stride stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + TensorNCHW(Stride const &stride = Stride(0)): stride_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static TensorNCHW packed(TensorCoord const &extent) { + return TensorNCHW( + make_Coord( + extent.w(), + extent.w() * extent.h(), + extent.h() * extent.w() * extent.c() + ) + ); + } + + /// Returns the offset of a coordinate in linear memory. + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return coord.w() + + LongIndex(stride_[0] * coord.h()) + + LongIndex(stride_[1] * coord.c()) + + LongIndex(stride_[2] * coord.n()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return extent.n() * stride_[2]; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Mapping function for 4-D NC/xHWx tensors. +template +class TensorNCxHWx { +public: + + /// Interleaving quantity + static int const kInterleave = Interleave; + + /// Logical rank of tensor + static int const kRank = 4; + + /// Rank of stride vector + static int const kStrideRank = 3; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = Tensor4DCoord; + + /// Stride vector + using Stride = Coord; + +private: + // + // Data members + // + + /// Stride data member - [Interleave x w, Interleave x wh, hwc] + Stride stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + TensorNCxHWx(Stride const &stride = Stride(0)): stride_(stride) { } + + /// Constructor + CUTLASS_HOST_DEVICE + TensorNCxHWx( + typename Stride::Index stride_w, ///< number of elements between adjacent W coordinates + typename Stride::Index stride_h, ///< number of elements between adjacent H coordinates + typename Stride::Index stride_n ///< number of elements between adjacent N coordinates + ): + stride_(make_Coord(stride_w, stride_h, stride_n)) { } + + /// Constructor + // Once convolutions implement 64b stride this ctor can be deleted + CUTLASS_HOST_DEVICE + TensorNCxHWx(Coord const &stride): + stride_(make_Coord( + static_cast(stride[0]), + static_cast(stride[1]), + static_cast(stride[2])) + ) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static TensorNCxHWx packed(TensorCoord const &extent) { + return TensorNCxHWx( + make_Coord( + kInterleave * extent.w(), + kInterleave * extent.w() * extent.h(), + extent.h() * extent.w() * extent.c() + ) + ); + } + + /// Returns the offset of a coordinate in linear memory. + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + + Index c_minor = (coord.c() % kInterleave); + Index c_major = (coord.c() / kInterleave); + + return c_minor + + LongIndex(kInterleave * coord.w()) + + LongIndex(stride_[0] * coord.h()) + + LongIndex(stride_[1] * c_major) + + LongIndex(stride_[2] * coord.n()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return extent.n() * stride_[2]; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Mapping function for 4-D CxRSKx tensors. +template +class TensorCxRSKx { +public: + + /// Interleaving quantity + static int const kInterleave = Interleave; + + /// Logical rank of tensor + static int const kRank = 4; + + /// Rank of stride vector + static int const kStrideRank = 3; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = Tensor4DCoord; + + /// Stride vector + using Stride = Coord; + +private: + // + // Data members + // + + /// Stride data member - [Interleave x n, Interleave x nw, Interleave x nwh] + Stride stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + TensorCxRSKx(Stride const &stride = Stride(0)): stride_(stride) { } + + /// Constructor + CUTLASS_HOST_DEVICE + TensorCxRSKx( + typename Stride::Index stride_w, ///< number of elements between adjacent W coordinates + typename Stride::Index stride_h, ///< number of elements between adjacent H coordinates + typename Stride::Index stride_n ///< number of elements between adjacent N coordinates + ): + stride_(make_Coord(stride_w, stride_h, stride_n)) { } + + /// Constructor + // Once convolutions implement 64b stride this ctor can be deleted + CUTLASS_HOST_DEVICE + TensorCxRSKx(Coord const &stride): + stride_(make_Coord( + static_cast(stride[0]), + static_cast(stride[1]), + static_cast(stride[2])) + ) { } + + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static TensorCxRSKx packed(TensorCoord const &extent) { + return TensorCxRSKx( + make_Coord( + kInterleave * extent.n(), + kInterleave * extent.n() * extent.w(), + kInterleave * extent.n() * extent.w() * extent.h() + ) + ); + } + + /// Returns the offset of a coordinate in linear memory. + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + + Index c_minor = (coord.c() % kInterleave); + Index c_major = (coord.c() / kInterleave); + + return c_minor + + LongIndex(kInterleave * coord.n()) + + LongIndex(stride_[0] * coord.w()) + + LongIndex(stride_[1] * coord.h()) + + LongIndex(stride_[2] * c_major); + } + + /// Returns the offset of a pitchlinear coordinate in linear memory. + CUTLASS_HOST_DEVICE + LongIndex operator()(PitchLinearCoord const &coord) const { + return (coord.contiguous() % kInterleave) + + LongIndex((coord.contiguous() / kInterleave) * stride_[2]) + + LongIndex(coord.strided() * kInterleave); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return (extent.c() / kInterleave * stride_[2]); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Mapping function for 5-D NDHWC tensors. +class TensorNDHWC { +public: + /// Logical rank of tensor + static int const kRank = 5; + + /// Rank of stride vector + static int const kStrideRank = 4; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate (n, d, h, w, c) + using TensorCoord = Tensor5DCoord; + + /// Stride vector + using Stride = Coord; + +private: + // + // Data members + // + + /// Stride data member - [c, wc, hwc, dhwc] + Stride stride_; + +public: + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + TensorNDHWC(Stride const &stride = Stride(0)): stride_(stride) { } + + /// Constructor + CUTLASS_HOST_DEVICE + TensorNDHWC( + typename Stride::Index c, + typename Stride::Index wc, + typename Stride::Index hwc, + typename Stride::Index dhwc): + stride_(make_Coord(c, wc, hwc, dhwc)) { } + + /// Constructor + // Once convolutions implement 64b stride this ctor can be deleted + CUTLASS_HOST_DEVICE + TensorNDHWC(Coord const &stride): + stride_(make_Coord( + static_cast(stride[0]), + static_cast(stride[1]), + static_cast(stride[2]), + static_cast(stride[3])) + ) { } + + /// Helper returns a layout to a tightly packed NHWC tensor. + CUTLASS_HOST_DEVICE + static TensorNDHWC packed(TensorCoord const &extent) { + return TensorNDHWC( + make_Coord( + extent.c(), + extent.w() * extent.c(), + extent.h() * extent.w() * extent.c(), + extent.d() * extent.h() * extent.w() * extent.c() + ) + ); + } + + /// Returns the offset of a coordinate (n, d, h, w, c) in linear memory. + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return coord.c() + + LongIndex(stride_[0] * coord.w()) + + LongIndex(stride_[1] * coord.h()) + + LongIndex(stride_[2] * coord.d()) + + LongIndex(stride_[3] * coord.n()); + } + + /// Returns the offset of a pitchlinear coordinate in linear memory. + CUTLASS_HOST_DEVICE + LongIndex operator()(PitchLinearCoord coord) const { + return coord.contiguous() + LongIndex(coord.strided() * stride_[3]); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + // it does not make sense if the extent is larger than stride + // and we could not rely on the capacity calculation in such cases + // we could move this checkers to debug code only + if ((extent.c() > stride_[0]) + || (extent.w() * stride_[0] > stride_[1]) + || (extent.h() * stride_[1] > stride_[2]) + || (extent.d() * stride_[2] > stride_[3])) { + assert(0); + } + return extent.n() * stride_[3]; + } +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace layout +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm70.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm70.h new file mode 100644 index 0000000000000000000000000000000000000000..b127bffe394ddd02eed66fff5cb952a1101518e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm70.h @@ -0,0 +1,1044 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/coord.h" +#include "cutlass/layout/pitch_linear.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace layout { + +// template < +// int ElementSize, +// gemm::Operand Operand +// > +// struct VoltaTensorOpMultiplicandCongruous; + +// template < +// int ElementSize, +// gemm::Operand Operand +// > +// struct ColumnMajorVoltaTensorOpMultiplicandCongruous; +// template < +// int ElementSize, +// gemm::Operand Operand +// > +// struct RowMajorVoltaTensorOpMultiplicandCongruous; +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Template based on element size (in bits) - defined in terms of pitch-linear memory. +template +struct VoltaTensorOpMultiplicandCongruous { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = PitchLinearCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + /// This layout is optimized for 128b accesses + static int const kAccessSize = 128; + + /// Fundamental tile shape in units of vectors + using TileShape = PitchLinearShape<8, 4>; + + /// Fundamental partition shape in units of vectors + using PartitionShape = PitchLinearShape<8, 2>; + + // + // Static constants + // + + static int const kElementSize = ElementSize; + static int const kElementsPerAccess = kAccessSize / kElementSize; + + using PartitionCount = PitchLinearShape< + TileShape::kContiguous / PartitionShape::kContiguous, + TileShape::kStrided / PartitionShape::kStrided + >; + + using AccessCount = PitchLinearShape< + PartitionShape::kContiguous, + PartitionShape::kStrided + >; + +private: + + // + // Data members + // + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + VoltaTensorOpMultiplicandCongruous(Index ldm = 0): stride_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + VoltaTensorOpMultiplicandCongruous(Stride stride): stride_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static VoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) { + return VoltaTensorOpMultiplicandCongruous(extent[0]); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + + // First, compute c and s of vector within source (in units of vector accesses) + int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess; + int vec_strided_idx = coord.strided(); + + // Compute the fundamental tile being accessed + int tile_contiguous_idx = vec_contiguous_idx / TileShape::kContiguous; + int tile_strided_idx = vec_strided_idx / TileShape::kStrided; + + int tile_contiguous_residual = vec_contiguous_idx % TileShape::kContiguous; + int tile_strided_residual = vec_strided_idx % TileShape::kStrided; + + // Then swizzle in a tile + // Swizzle pattern is (tid[2:0] << 2)|(tid[4:3] ^ tid[2:1]) + int permuted_strided_within_tile = (tile_contiguous_residual >> 1); + int permuted_contiguous_within_tile = (tile_strided_residual ^ permuted_strided_within_tile) | + ((tile_contiguous_residual & 1) << 2); + // Compute final element location + int element_contiguous = (tile_contiguous_idx * TileShape::kContiguous + + permuted_contiguous_within_tile) * kElementsPerAccess + (coord.contiguous() % kElementsPerAccess); + + int element_strided = tile_strided_idx * TileShape::kStrided + permuted_strided_within_tile; + + return element_contiguous + element_strided * stride_[0]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return extent[1] * stride_[0]; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Template mapping a column-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous +template +struct ColumnMajorVoltaTensorOpMultiplicandCongruous { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = VoltaTensorOpMultiplicandCongruous; + + /// This layout is optimized for 128b accesses + static int const kAccessSize = Base::kAccessSize; + using TileShape = typename Base::TileShape; + using PartitionShape = typename Base::PartitionShape; + + // + // Static constants + // + + static int const kElementSize = Base::kElementSize; + static int const kElementsPerAccess = Base::kElementsPerAccess; + using PartitionCount = typename Base::PartitionCount; + using AccessCount = typename Base::AccessCount; + +private: + + // + // Data members + // + + Base layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorVoltaTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorVoltaTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static ColumnMajorVoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) { + return ColumnMajorVoltaTensorOpMultiplicandCongruous(extent.row()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.row(), coord.column())); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return MatrixCoord(coord.contiguous(), coord.strided()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return layout_.stride(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return layout_.stride(); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); + } +}; + +/// Template mapping a row-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous +template +struct RowMajorVoltaTensorOpMultiplicandCongruous { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = VoltaTensorOpMultiplicandCongruous; + + /// This layout is optimized for 128b accesses + static int const kAccessSize = Base::kAccessSize; + using TileShape = typename Base::TileShape; + using PartitionShape = typename Base::PartitionShape; + + // + // Static constants + // + + static int const kElementSize = Base::kElementSize; + static int const kElementsPerAccess = Base::kElementsPerAccess; + using PartitionCount = typename Base::PartitionCount; + using AccessCount = typename Base::AccessCount; + +private: + + // + // Data members + // + + Base layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorVoltaTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorVoltaTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static RowMajorVoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) { + return RowMajorVoltaTensorOpMultiplicandCongruous(extent.column()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.column(), coord.row())); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return MatrixCoord(coord.strided(), coord.contiguous()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return layout_.stride(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return layout_.stride(); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); + } +}; + + +/// Template based on element size (in bits) - defined in terms of pitch-linear memory. +// template +template +struct VoltaTensorOpMultiplicandBCongruous { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = PitchLinearCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + /// This layout is optimized for 128b accesses + static int const kAccessSize = 128; + + /// Fundamental tile shape in units of vectors + using TileShape = PitchLinearShape<8, 4>; + + /// Fundamental partition shape in units of vectors + using PartitionShape = PitchLinearShape<4, 4>; + + // + // Static constants + // + + static int const kElementSize = ElementSize; + static int const kElementsPerAccess = kAccessSize / kElementSize; + + using PartitionCount = PitchLinearShape< + TileShape::kContiguous / PartitionShape::kContiguous, + TileShape::kStrided / PartitionShape::kStrided + >; + + using AccessCount = PitchLinearShape< + PartitionShape::kContiguous, + PartitionShape::kStrided + >; + +private: + + // + // Data members + // + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + VoltaTensorOpMultiplicandBCongruous(Index ldm = 0): stride_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + VoltaTensorOpMultiplicandBCongruous(Stride stride): stride_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static VoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) { + return VoltaTensorOpMultiplicandBCongruous(extent[0]); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + + // First, compute c and s of vector within source (in units of vector accesses) + int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess; + int vec_strided_idx = coord.strided(); + + // Compute the fundamental tile being accessed + int tile_contiguous_idx = vec_contiguous_idx / TileShape::kContiguous; + int tile_strided_idx = vec_strided_idx / TileShape::kStrided; + + int tile_contiguous_residual = vec_contiguous_idx % TileShape::kContiguous; + int tile_strided_residual = vec_strided_idx % TileShape::kStrided; + + // Then swizzle in a tile + // Swizzle pattern is (tid[1:0] << 3)|(tid & 0x4)|(tid[1:0]) + int permuted_strided_within_tile = (tile_contiguous_residual & 0x3); + int permuted_contiguous_within_tile = (tile_strided_residual ^ permuted_strided_within_tile) | + (tile_contiguous_residual & 0x4); + + // Compute final element location + int element_contiguous = (tile_contiguous_idx * TileShape::kContiguous + + permuted_contiguous_within_tile) * kElementsPerAccess + (coord.contiguous() % kElementsPerAccess); + + int element_strided = tile_strided_idx * TileShape::kStrided + permuted_strided_within_tile; + + return element_contiguous + element_strided * stride_[0]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return extent[1] * stride_[0]; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Template mapping a column-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous +template +struct ColumnMajorVoltaTensorOpMultiplicandBCongruous { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = VoltaTensorOpMultiplicandBCongruous; + + /// This layout is optimized for 128b accesses + static int const kAccessSize = Base::kAccessSize; + using TileShape = typename Base::TileShape; + using PartitionShape = typename Base::PartitionShape; + + // + // Static constants + // + + static int const kElementSize = Base::kElementSize; + static int const kElementsPerAccess = Base::kElementsPerAccess; + using PartitionCount = typename Base::PartitionCount; + using AccessCount = typename Base::AccessCount; + +private: + + // + // Data members + // + + Base layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorVoltaTensorOpMultiplicandBCongruous(Index ldm = 0): layout_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorVoltaTensorOpMultiplicandBCongruous(Stride stride): layout_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static ColumnMajorVoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) { + return ColumnMajorVoltaTensorOpMultiplicandBCongruous(extent.row()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.row(), coord.column())); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return MatrixCoord(coord.contiguous(), coord.strided()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return layout_.stride(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return layout_.stride(); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); + } +}; + +/// Template mapping a row-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous +template +struct RowMajorVoltaTensorOpMultiplicandBCongruous { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = VoltaTensorOpMultiplicandBCongruous; + + /// This layout is optimized for 128b accesses + static int const kAccessSize = Base::kAccessSize; + using TileShape = typename Base::TileShape; + using PartitionShape = typename Base::PartitionShape; + + // + // Static constants + // + + static int const kElementSize = Base::kElementSize; + static int const kElementsPerAccess = Base::kElementsPerAccess; + using PartitionCount = typename Base::PartitionCount; + using AccessCount = typename Base::AccessCount; + +private: + + // + // Data members + // + + Base layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorVoltaTensorOpMultiplicandBCongruous(Index ldm = 0): layout_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorVoltaTensorOpMultiplicandBCongruous(Stride stride): layout_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static RowMajorVoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) { + return RowMajorVoltaTensorOpMultiplicandBCongruous(extent.column()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.column(), coord.row())); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return MatrixCoord(coord.strided(), coord.contiguous()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return layout_.stride(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return layout_.stride(); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); + } +}; + +/// Template based on element size (in bits) - defined in terms of pitch-linear +/// memory and KBlock size (in elements). +template +struct VoltaTensorOpMultiplicandCrosswise { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = PitchLinearCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + /// This layout is optimized for 64b accesses + static int const kAccessSize = 64; + + // + // Static constants + // + + static int const kElementSize = ElementSize; + static int const kElementsPerAccess = kAccessSize / kElementSize; + static int const kKBlock = KBlock; + + private: + // + // Data members + // + + /// Stride data member. For GEMM, it equals to KBlock x stage. + Stride stride_; + public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + VoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : stride_(ldm) {} + + /// Ctor + CUTLASS_HOST_DEVICE + VoltaTensorOpMultiplicandCrosswise(Stride stride) : stride_(stride) {} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static VoltaTensorOpMultiplicandCrosswise packed(TensorCoord const &extent) { + return VoltaTensorOpMultiplicandCrosswise(extent[1]); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + + // + // First, compute c and s of vector within source (in units of vector + // accesses) + // + int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess; + int vec_strided_idx = coord.strided(); + + // + // Then swizzle + // The mapping is like this: + // id[1:0]|(id[3]^id[4])|id[2] + + int vec_strided_within_tile = vec_contiguous_idx & 0x7; + int permuted_vec_contiguous = + (vec_strided_idx & (~0xF)) + (vec_strided_idx & 0x3) * 4 + + (((vec_strided_idx >> 2) ^ ((vec_strided_idx & 0x10) >> 3)) & 0x3); + + permuted_vec_contiguous ^= ((vec_strided_within_tile >> 1) & 0x3); + + int permuted_vec_strided = vec_contiguous_idx; + + // + // Compute final element location + // + + int element_contiguous = permuted_vec_contiguous * kElementsPerAccess + + (coord.contiguous() % kElementsPerAccess); + + return element_contiguous + permuted_vec_strided * (stride_[0] * kElementsPerAccess); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { return stride_; } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride &stride() { return stride_; } + + /// Compute the number of contiguous elements needed to store a tensor with + /// the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return extent[0] * stride_[0]; + } +}; + +/// Template mapping a column-major view of pitch-linear memory to +/// VoltaTensorOpMultiplicandCrosswise +template +struct ColumnMajorVoltaTensorOpMultiplicandCrosswise { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = VoltaTensorOpMultiplicandCrosswise; + + /// This layout is optimized for 64b accesses + static int const kAccessSize = Base::kAccessSize; + + // + // Static constants + // + + static int const kElementSize = Base::kElementSize; + static int const kElementsPerAccess = Base::kElementsPerAccess; + + private: + // + // Data members + // + + Base layout_; + + public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorVoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {} + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorVoltaTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static ColumnMajorVoltaTensorOpMultiplicandCrosswise packed( + TensorCoord const &extent) { + return ColumnMajorVoltaTensorOpMultiplicandCrosswise(extent.column()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.row(), coord.column())); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return MatrixCoord(coord.contiguous(), coord.strided()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { return layout_.stride(); } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride &stride() { return layout_.stride(); } + + /// Compute the number of contiguous elements needed to store a tensor with + /// the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); + } +}; + +/// Template mapping a row-major view of pitch-linear memory to +/// TensorOpMultiplicandCrosswise +template +struct RowMajorVoltaTensorOpMultiplicandCrosswise { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = VoltaTensorOpMultiplicandCrosswise; + + /// This layout is optimized for 64b accesses + static int const kAccessSize = Base::kAccessSize; + + // + // Static constants + // + + static int const kElementSize = Base::kElementSize; + static int const kElementsPerAccess = Base::kElementsPerAccess; + + private: + // + // Data members + // + + Base layout_; + + public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorVoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {} + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorVoltaTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static RowMajorVoltaTensorOpMultiplicandCrosswise packed( + TensorCoord const &extent) { + return RowMajorVoltaTensorOpMultiplicandCrosswise(extent.row()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.column(), coord.row())); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return MatrixCoord(coord.strided(), coord.contiguous()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { return layout_.stride(); } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride &stride() { return layout_.stride(); } + + /// Compute the number of contiguous elements needed to store a tensor with + /// the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); + } +}; + +} // namespace layout +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm75.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm75.h new file mode 100644 index 0000000000000000000000000000000000000000..14148b7c7a74e2c4790627417349ad6c2b0239b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm75.h @@ -0,0 +1,1161 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/coord.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/layout/pitch_linear.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace layout { + +//////////////////////////////////////////////////////////////////////////////// + +/// Template based on element size (in bits) - defined in terms of pitch-linear +/// memory and Crosswise size (in elements). +/// This one is the base class of all Ampere/Turing fp16/bf16/int8/int4/int1 +/// tensor core kernels. tf32 TN uses this too. +template +struct TensorOpMultiplicand { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = PitchLinearCoord; + + /// Stride vector + using Stride = Coord; + + // + // Static constants + // + + /// This layout is optimized for 128b accesses + static int const kAccessSize = 128; + + static int const kElementSize = ElementSize; + static int const kElementsPerAccess = kAccessSize / kElementSize; + static int const kCrosswise = Crosswise; + + /// Contiguous dimension of the tile shape matches one shared memory cache + /// line - 128B. For 128bit access size, it equals to 8 accesses. + static int const kTileShapeContiguous = 128 / (kAccessSize / 8); + + /// Number of kblocks to store PartitionShape::kContiguous Elements + static int const kFactor = + kTileShapeContiguous * kElementsPerAccess / kCrosswise; + + static_assert( + (kFactor > 0), + "kCrosswise should be no large than one shared memory cache line."); + + /// The strided dimension needs to be at least (WarpSize(32) / + /// kTileShapeContiguous) for a warp to access. To ensure conflict free + /// access, it also needs to be at least (kTileShapeContiguous / kFactor). + /// See comments below + static int const kTileShapeStride = + ((kTileShapeContiguous / kFactor) > (32 / kTileShapeContiguous)) + ? (kTileShapeContiguous / kFactor) + : (32 / kTileShapeContiguous); + + /// Fundamental tile shape in units of vectors to guarantee bank conflict free + /// shared memory load/store. + /// For kFactor = 1, TileShape = <8, 8> + /// For kFactor > 1, TileShape = <8, 4> + using TileShape = PitchLinearShape; + + /// Fundamental partition shape in units of vectors + using PartitionShape = PitchLinearShape<4, 4>; + + using PartitionCount = + PitchLinearShape; + + using AccessCount = + PitchLinearShape; + + private: + // + // Data members + // + + /// Stride data member. For GEMM, it equals to kCrosswise x stage. + Stride stride_; + + public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicand(Index ldm = 0) : stride_(ldm) {} + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicand(Stride stride) : stride_(stride) {} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static TensorOpMultiplicand packed(TensorCoord const &extent) { + return TensorOpMultiplicand(extent[0]); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + // + // First, compute c and s of vector within source (in units of vector + // accesses) + // + + int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess; + int vec_strided_idx = coord.strided() / kFactor; + + // Compute the fundamental tile being accessed + int tile_contiguous_idx = + vec_contiguous_idx / (TileShape::kContiguous / kFactor); + + int tile_contiguous_residual = + vec_contiguous_idx % (TileShape::kContiguous / kFactor) + + ((coord.strided() % kFactor) * (TileShape::kContiguous / kFactor)); + int tile_strided_residual = vec_strided_idx % TileShape::kStrided; + + // Compute the 'partition' within the fundamental tile + int partition_contiguous_idx = + tile_contiguous_residual / PartitionShape::kContiguous; + int partition_strided_idx = + tile_strided_residual / PartitionShape::kStrided; + + int partition_contiguous_residual = + tile_contiguous_residual % PartitionShape::kContiguous; + int partition_strided_residual = + tile_strided_residual % PartitionShape::kStrided; + + // + // Then swizzle + // + + int permuted_vec_contiguous_within_partition = + partition_contiguous_residual ^ (partition_strided_residual % 4); + + int permuted_partition_contiguous_within_tile = + partition_contiguous_idx ^ (partition_strided_idx % 2); + + // + // Compute final element location + // + + int element_contiguous = (tile_contiguous_idx * TileShape::kContiguous + + permuted_partition_contiguous_within_tile * + PartitionShape::kContiguous + + permuted_vec_contiguous_within_partition) * + kElementsPerAccess + + (coord.contiguous() % kElementsPerAccess); + + int element_strided = vec_strided_idx; + + return element_contiguous + element_strided * stride_[0] * kFactor; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { return stride_; } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride &stride() { return stride_; } + + /// Compute the number of contiguous elements needed to store a tensor with + /// the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return extent[1] * stride_[0]; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template based on element size (in bits) - defined in terms of pitch-linear +/// memory and Crosswise size (in elements). +template +struct TensorOpMultiplicandCongruous { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = PitchLinearCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = TensorOpMultiplicand; + + /// This layout is optimized for 128b accesses + static int const kAccessSize = Base::kAccessSize; + using TileShape = typename Base::TileShape; + using PartitionShape = typename Base::PartitionShape; + + // + // Static constants + // + + static int const kElementSize = Base::kElementSize; + static int const kElementsPerAccess = Base::kElementsPerAccess; + using PartitionCount = typename Base::PartitionCount; + using AccessCount = typename Base::AccessCount; + + private: + // + // Data members + // + + Base layout_; + + public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandCongruous(Index ldm = 0) : layout_(ldm) {} + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandCongruous(Stride stride) : layout_(stride) {} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static TensorOpMultiplicandCongruous packed(TensorCoord const &extent) { + return TensorOpMultiplicandCongruous(extent[0]); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(coord); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return coord; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { return layout_.stride(); } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride &stride() { return layout_.stride(); } + + /// Compute the number of contiguous elements needed to store a tensor with + /// the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(extent); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template based on element size (in bits) - defined in terms of pitch-linear +/// memory and Crosswise size (in elements). +/// This one is just for TF32 NT kernel. +template +struct TensorOpMultiplicandCongruous<32, Crosswise> { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = PitchLinearCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + /// This layout is optimized for 128b accesses + static int const kAccessSize = 128; + + /// Fundamental tile shape in units of vectors + using TileShape = PitchLinearShape<8, 4>; + + /// Partitionshape is the same as TileShape for this layout + using PartitionShape = PitchLinearShape<8, 4>; + + using PartitionCount = + PitchLinearShape; + + using AccessCount = + PitchLinearShape; + + // + // Static constants + // + static int const kElementSize = 32; + static int const kElementsPerAccess = kAccessSize / kElementSize; + + private: + // + // Data members + // + + /// Stride data member. + Stride stride_; + + public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandCongruous(Index ldm = 0) : stride_(ldm) {} + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandCongruous(Stride stride) : stride_(stride) {} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static TensorOpMultiplicandCongruous packed(TensorCoord const &extent) { + return TensorOpMultiplicandCongruous(extent[0]); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + int tc = coord.contiguous() / 32; + int ts = coord.strided() / 4; + + int c = (coord.contiguous() % 32) / kElementsPerAccess; + int s = coord.strided() % 4; + + LongIndex offset = (c ^ (2 * s)) * kElementsPerAccess + s * stride_[0] + + tc * 32 + ts * stride_[0] * 4 + coord.contiguous() % 4; + + return offset; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { return stride_; } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride &stride() { return stride_; } + + /// Compute the number of contiguous elements needed to store a tensor with + /// the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return extent[1] * stride_[0]; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template mapping a column-major view of pitch-linear memory to +/// TensorOpMultiplicand +template +struct ColumnMajorTensorOpMultiplicandCongruous { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = TensorOpMultiplicandCongruous; + + /// This layout is optimized for 128b accesses + static int const kAccessSize = Base::kAccessSize; + using TileShape = typename Base::TileShape; + using PartitionShape = typename Base::PartitionShape; + + // + // Static constants + // + + static int const kElementSize = Base::kElementSize; + static int const kElementsPerAccess = Base::kElementsPerAccess; + using PartitionCount = typename Base::PartitionCount; + using AccessCount = typename Base::AccessCount; + +private: + + // + // Data members + // + + Base layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static ColumnMajorTensorOpMultiplicandCongruous packed(TensorCoord const &extent) { + return ColumnMajorTensorOpMultiplicandCongruous(extent.row()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.row(), coord.column())); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return MatrixCoord(coord.contiguous(), coord.strided()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return layout_.stride(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return layout_.stride(); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template mapping a row-major view of pitch-linear memory to +/// TensorOpMultiplicand +template +struct RowMajorTensorOpMultiplicandCongruous { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = TensorOpMultiplicandCongruous; + + /// This layout is optimized for 128b accesses + static int const kAccessSize = Base::kAccessSize; + using TileShape = typename Base::TileShape; + using PartitionShape = typename Base::PartitionShape; + + // + // Static constants + // + + static int const kElementSize = Base::kElementSize; + static int const kElementsPerAccess = Base::kElementsPerAccess; + using PartitionCount = typename Base::PartitionCount; + using AccessCount = typename Base::AccessCount; + +private: + + // + // Data members + // + + Base layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static RowMajorTensorOpMultiplicandCongruous packed(TensorCoord const &extent) { + return RowMajorTensorOpMultiplicandCongruous(extent.column()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.column(), coord.row())); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return MatrixCoord(coord.strided(), coord.contiguous()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return layout_.stride(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return layout_.stride(); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template based on element size (in bits) - defined in terms of pitch-linear +/// memory and Crosswise size (in elements). +template +struct TensorOpMultiplicandCrosswise { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = PitchLinearCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = TensorOpMultiplicand; + + /// This layout is optimized for 128b accesses + static int const kAccessSize = Base::kAccessSize; + using TileShape = typename Base::TileShape; + using PartitionShape = typename Base::PartitionShape; + + // + // Static constants + // + + static int const kElementSize = Base::kElementSize; + static int const kElementsPerAccess = Base::kElementsPerAccess; + static int const kCrosswise = Base::kCrosswise; + static int const kFactor = Base::kFactor; + using PartitionCount = typename Base::PartitionCount; + using AccessCount = typename Base::AccessCount; + + private: + // + // Data members + // + + Base layout_; + + public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {} + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static TensorOpMultiplicandCrosswise packed(TensorCoord const &extent) { + return TensorOpMultiplicandCrosswise(extent[0]); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(coord); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return coord; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { return layout_.stride(); } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride &stride() { return layout_.stride(); } + + /// Compute the number of contiguous elements needed to store a tensor with + /// the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(extent); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template mapping a column-major view of pitch-linear memory to +/// TensorOpMultiplicandCrosswise +template +struct ColumnMajorTensorOpMultiplicandCrosswise { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = TensorOpMultiplicandCrosswise; + + /// This layout is optimized for 128b accesses + static int const kAccessSize = Base::kAccessSize; + using TileShape = typename Base::TileShape; + using PartitionShape = typename Base::PartitionShape; + + // + // Static constants + // + + static int const kElementSize = Base::kElementSize; + static int const kElementsPerAccess = Base::kElementsPerAccess; + using PartitionCount = typename Base::PartitionCount; + using AccessCount = typename Base::AccessCount; + + private: + // + // Data members + // + + Base layout_; + + public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {} + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static ColumnMajorTensorOpMultiplicandCrosswise packed( + TensorCoord const &extent) { + return ColumnMajorTensorOpMultiplicandCrosswise(extent.row()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.row(), coord.column())); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return MatrixCoord(coord.contiguous(), coord.strided()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { return layout_.stride(); } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride &stride() { return layout_.stride(); } + + /// Compute the number of contiguous elements needed to store a tensor with + /// the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template mapping a row-major view of pitch-linear memory to +/// TensorOpMultiplicandCrosswise +template +struct RowMajorTensorOpMultiplicandCrosswise { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = TensorOpMultiplicandCrosswise; + + /// This layout is optimized for 128b accesses + static int const kAccessSize = Base::kAccessSize; + using TileShape = typename Base::TileShape; + using PartitionShape = typename Base::PartitionShape; + + // + // Static constants + // + + static int const kElementSize = Base::kElementSize; + static int const kElementsPerAccess = Base::kElementsPerAccess; + using PartitionCount = typename Base::PartitionCount; + using AccessCount = typename Base::AccessCount; + + private: + // + // Data members + // + + Base layout_; + + public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {} + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static RowMajorTensorOpMultiplicandCrosswise packed( + TensorCoord const &extent) { + return RowMajorTensorOpMultiplicandCrosswise(extent.column()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.column(), coord.row())); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return MatrixCoord(coord.strided(), coord.contiguous()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { return layout_.stride(); } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride &stride() { return layout_.stride(); } + + /// Compute the number of contiguous elements needed to store a tensor with + /// the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template based on element size (in bits) - defined in terms of pitch-linear memory. +template +struct TensorOpMultiplicandColumnMajorInterleaved { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = PitchLinearCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + /// This layout is optimized for 128b accesses + static int const kAccessSize = 128; + + // + // Static constants + // + + static int const kElementSize = ElementSize; + static int const kElementsPerAccess = kAccessSize / kElementSize; + + //static int const kThreadBlockStrided = ThreadBlockStrided; + static int const kInterleavedK = InterleavedK; + +private: + + // + // Data members + // + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandColumnMajorInterleaved(Index ldm = 0): stride_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandColumnMajorInterleaved(Stride stride): stride_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static TensorOpMultiplicandColumnMajorInterleaved packed(TensorCoord const &extent) { + return TensorOpMultiplicandColumnMajorInterleaved(extent[0] * kInterleavedK); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + int const rows_per_smem_cache_line = 128 / kInterleavedK; + + int row_id = coord.strided() / rows_per_smem_cache_line; + int col_id = (coord.strided() % rows_per_smem_cache_line) * kInterleavedK + coord.contiguous(); + + int access_block_id = col_id >> 4; + int swizzle_access_block_id = access_block_id ^ (row_id & 1); + + int swizzle_col_id = swizzle_access_block_id << 4; + + return row_id * 128 + swizzle_col_id; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return (extent[1] / kInterleavedK) * stride_[0]; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template based on element size (in bits) - defined in terms of pitch-linear memory. +template +struct TensorOpMultiplicandRowMajorInterleaved { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = PitchLinearCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + /// This layout is optimized for 128b accesses + static int const kAccessSize = 128; + + // + // Static constants + // + + static int const kElementSize = ElementSize; + static int const kElementsPerAccess = kAccessSize / kElementSize; + + //static int const kThreadBlockStrided = ThreadBlockStrided; + static int const kInterleavedK = InterleavedK; + +private: + + // + // Data members + // + + /// Stride data member + Stride stride_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandRowMajorInterleaved(Index ldm = 0): stride_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandRowMajorInterleaved(Stride stride): stride_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static TensorOpMultiplicandRowMajorInterleaved packed(TensorCoord const &extent) { + return TensorOpMultiplicandRowMajorInterleaved(extent[1] * kInterleavedK); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + int const rows_per_smem_cache_line = 128 / kInterleavedK; + + int row_id = coord.strided() / rows_per_smem_cache_line; + int col_id = (coord.strided() % rows_per_smem_cache_line) * kInterleavedK + coord.contiguous(); + + int access_block_id = col_id >> 4; + int swizzle_access_block_id = access_block_id ^ (row_id & 1); + + int swizzle_col_id = swizzle_access_block_id << 4; + + return row_id * 128 + swizzle_col_id; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return stride_; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return stride_; + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return (extent[0] / kInterleavedK) * stride_[0]; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace layout +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm80.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm80.h new file mode 100644 index 0000000000000000000000000000000000000000..f75c2a809097d66f2561fc35c2be1aa6b3c542e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm80.h @@ -0,0 +1,1139 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief layouts needed by Ampere fp64 tensor core kernels. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace layout { + +//////////////////////////////////////////////////////////////////////////////// + +/// Template based on element size (in bits) - defined in terms of pitch-linear +/// memory and Crosswise size (in elements). +struct TensorOpMultiplicandCongruous64b { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = PitchLinearCoord; + + /// Stride vector + using Stride = Coord; + + // + // Static constants + // + + static int const kElementSize = 64; + static int const kElementsPerAccess = 1; + + private: + + // + // Data members + // + + /// Stride data member. + Stride stride_; + + public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandCongruous64b(Index ldm = 0) : stride_(ldm) {} + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandCongruous64b(Stride stride) : stride_(stride) {} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static TensorOpMultiplicandCongruous64b packed(TensorCoord const &extent) { + return TensorOpMultiplicandCongruous64b(extent[0]); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + + int tc = coord.contiguous() / 16; + int ts = coord.strided() / 4; + + int c = coord.contiguous() % 16; + int s = coord.strided() % 4; + + + int bank = ((((c & 1) * 4 + (c & 6) / 2)) ^ (s & 1)) * 2 + (c / 8); + int row = (c & 6) / 2; + + bank ^= ((s & 2) * 2); + + LongIndex offset = tc * 16 + bank + (ts * 4 + row) * stride_[0]; + + return offset; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { return stride_; } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride &stride() { return stride_; } + + /// Compute the number of contiguous elements needed to store a tensor with + /// the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return extent[1] * stride_[0]; + } + + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + return TensorCoord(); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template mapping a column-major view of pitch-linear memory to +/// TensorOpMultiplicand +struct ColumnMajorTensorOpMultiplicandCongruous64b { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = TensorOpMultiplicandCongruous64b; + +private: + + // + // Data members + // + + Base layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorTensorOpMultiplicandCongruous64b(Index ldm = 0): layout_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorTensorOpMultiplicandCongruous64b(Stride stride): layout_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static ColumnMajorTensorOpMultiplicandCongruous64b packed(TensorCoord const &extent) { + return ColumnMajorTensorOpMultiplicandCongruous64b(extent.row()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.row(), coord.column())); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return MatrixCoord(coord.contiguous(), coord.strided()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return layout_.stride(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return layout_.stride(); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template mapping a row-major view of pitch-linear memory to +/// TensorOpMultiplicand +struct RowMajorTensorOpMultiplicandCongruous64b { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = TensorOpMultiplicandCongruous64b; + +private: + + // + // Data members + // + + Base layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorTensorOpMultiplicandCongruous64b(Index ldm = 0): layout_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorTensorOpMultiplicandCongruous64b(Stride stride): layout_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static RowMajorTensorOpMultiplicandCongruous64b packed(TensorCoord const &extent) { + return RowMajorTensorOpMultiplicandCongruous64b(extent.column()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.column(), coord.row())); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return MatrixCoord(coord.strided(), coord.contiguous()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return layout_.stride(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return layout_.stride(); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template based on element size (in bits) - defined in terms of pitch-linear +/// memory and Crosswise size (in elements). +struct TensorOpMultiplicand64bCrosswise { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = PitchLinearCoord; + + /// Stride vector + using Stride = Coord; + + // + // Static constants + // + + static int const kElementSize = 64; + static int const kElementsPerAccess = 1; + + private: + + // + // Data members + // + + /// Stride data member. + Stride stride_; + + public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicand64bCrosswise(Index ldm = 0) : stride_(ldm) {} + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicand64bCrosswise(Stride stride) : stride_(stride) {} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static TensorOpMultiplicand64bCrosswise packed(TensorCoord const &extent) { + return TensorOpMultiplicand64bCrosswise(extent[0]); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + + int tc = coord.contiguous() / 16; + int ts = coord.strided() / 16; + + int c = coord.contiguous() % 16; + int s = coord.strided() % 16; + + int k_group = c / 4; + int access_s = s / 2; + + int row = access_s % 4; + int bank = ((k_group & 2) << 2) ^ ((s % 2) << 3) + (c % 4) * 2 + (access_s / 4) ^ (k_group & 1); + + int smem_row = (k_group * 4 + row) + tc * 16; + int smem_col = ts * 16 + bank; + + LongIndex offset = smem_row * stride_[0] + smem_col; + + return offset; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { return stride_; } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride &stride() { return stride_; } + + /// Compute the number of contiguous elements needed to store a tensor with + /// the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return extent[1] * stride_[0]; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template based on element size (in bits) - defined in terms of pitch-linear +/// memory and Crosswise size (in elements). +struct ColumnMajorTensorOpMultiplicand64bCrosswise { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = TensorOpMultiplicand64bCrosswise; + +private: + + // + // Data members + // + + Base layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorTensorOpMultiplicand64bCrosswise(Index ldm = 0): layout_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorTensorOpMultiplicand64bCrosswise(Stride stride): layout_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static ColumnMajorTensorOpMultiplicand64bCrosswise packed(TensorCoord const &extent) { + return ColumnMajorTensorOpMultiplicand64bCrosswise(extent.column()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.row(), coord.column())); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return layout_.stride(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return layout_.stride(); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template based on element size (in bits) - defined in terms of pitch-linear +/// memory and Crosswise size (in elements). +struct RowMajorTensorOpMultiplicand64bCrosswise { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = TensorOpMultiplicand64bCrosswise; + +private: + + // + // Data members + // + + Base layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorTensorOpMultiplicand64bCrosswise(Index ldm = 0): layout_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorTensorOpMultiplicand64bCrosswise(Stride stride): layout_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static RowMajorTensorOpMultiplicand64bCrosswise packed(TensorCoord const &extent) { + return RowMajorTensorOpMultiplicand64bCrosswise(extent.row()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.column(), coord.row())); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return layout_.stride(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return layout_.stride(); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template based on element size (in bits) - defined in terms of pitch-linear +/// memory and Crosswise size (in elements). +struct TensorOpMultiplicandCongruous128b { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = PitchLinearCoord; + + /// Stride vector + using Stride = Coord; + + // + // Static constants + // + + static int const kElementSize = 128; + static int const kElementsPerAccess = 1; + + private: + + // + // Data members + // + + /// Stride data member. + Stride stride_; + + public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandCongruous128b(Index ldm = 0) : stride_(ldm) {} + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandCongruous128b(Stride stride) : stride_(stride) {} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static TensorOpMultiplicandCongruous128b packed(TensorCoord const &extent) { + return TensorOpMultiplicandCongruous128b(extent[0]); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + + Index tc = coord.contiguous() / 8; + Index ts = coord.strided() / 4; + + Index c = coord.contiguous() % 8; + Index s = coord.strided() % 4; + + Index k_index = (c / 2); + + Index bank = (((c & 1) * 4) | (s ^ k_index)); + + LongIndex offset = tc * 8 + bank + (ts * 4 + k_index) * stride_[0]; + + return offset; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { return stride_; } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride &stride() { return stride_; } + + /// Compute the number of contiguous elements needed to store a tensor with + /// the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return extent[1] * stride_[0]; + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + return TensorCoord(); + } +}; + + +//////////////////////////////////////////////////////////////////////////////// + +/// Template mapping a column-major view of pitch-linear memory to +/// TensorOpMultiplicand +struct ColumnMajorTensorOpMultiplicandCongruous128b { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = TensorOpMultiplicandCongruous128b; + +private: + + // + // Data members + // + + Base layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorTensorOpMultiplicandCongruous128b(Index ldm = 0): layout_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorTensorOpMultiplicandCongruous128b(Stride stride): layout_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static ColumnMajorTensorOpMultiplicandCongruous128b packed(TensorCoord const &extent) { + return ColumnMajorTensorOpMultiplicandCongruous128b(extent.row()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.row(), coord.column())); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return MatrixCoord(coord.contiguous(), coord.strided()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return layout_.stride(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return layout_.stride(); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template mapping a row-major view of pitch-linear memory to +/// TensorOpMultiplicand +struct RowMajorTensorOpMultiplicandCongruous128b { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = TensorOpMultiplicandCongruous128b; + +private: + + // + // Data members + // + + Base layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorTensorOpMultiplicandCongruous128b(Index ldm = 0): layout_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorTensorOpMultiplicandCongruous128b(Stride stride): layout_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static RowMajorTensorOpMultiplicandCongruous128b packed(TensorCoord const &extent) { + return RowMajorTensorOpMultiplicandCongruous128b(extent.column()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.column(), coord.row())); + } + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const { + PitchLinearCoord coord = layout_.inverse(offset); + return MatrixCoord(coord.strided(), coord.contiguous()); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return layout_.stride(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return layout_.stride(); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template based on element size (in bits) - defined in terms of pitch-linear +/// memory and Crosswise size (in elements). +struct TensorOpMultiplicandCrosswise128x4 { + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = PitchLinearCoord; + + /// Stride vector + using Stride = Coord; + + // + // Static constants + // + + static int const kElementSize = 128; + static int const kElementsPerAccess = 1; + + private: + + // + // Data members + // + + /// Stride data member. + Stride stride_; + + public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandCrosswise128x4(Index ldm = 0) : stride_(ldm) {} + + /// Ctor + CUTLASS_HOST_DEVICE + TensorOpMultiplicandCrosswise128x4(Stride stride) : stride_(stride) {} + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static TensorOpMultiplicandCrosswise128x4 packed(TensorCoord const &extent) { + return TensorOpMultiplicandCrosswise128x4(extent[0]); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + + Index tc = coord.contiguous() / 8; + Index ts = coord.strided() / 8; + + Index c = coord.contiguous() % 8; + Index s = coord.strided() % 8; + + Index liq = c % 4; + + Index bank = liq + ((s & 1) * 4) ^ (c & 4); + + Index k_index = (c & 4) + (s / 4) * 2 + ((s & 2) / 2); + + LongIndex offset = (tc * 8 + k_index) * stride_[0] + ts * 8 + bank; + + return offset; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { return stride_; } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride &stride() { return stride_; } + + /// Compute the number of contiguous elements needed to store a tensor with + /// the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return extent[1] * stride_[0]; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template mapping a column-major view of pitch-linear memory to +/// TensorOpMultiplicand +struct ColumnMajorTensorOpMultiplicandCrosswise128x4 { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = TensorOpMultiplicandCrosswise128x4; + +private: + + // + // Data members + // + + Base layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorTensorOpMultiplicandCrosswise128x4(Index ldm = 0): layout_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + ColumnMajorTensorOpMultiplicandCrosswise128x4(Stride stride): layout_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static ColumnMajorTensorOpMultiplicandCrosswise128x4 packed(TensorCoord const &extent) { + return ColumnMajorTensorOpMultiplicandCrosswise128x4(extent.column()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.row(), coord.column())); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return layout_.stride(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return layout_.stride(); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Template mapping a row-major view of pitch-linear memory to +/// TensorOpMultiplicand +struct RowMajorTensorOpMultiplicandCrosswise128x4 { + + /// Logical rank of tensor + static int const kRank = 2; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = MatrixCoord; + + /// Stride vector + using Stride = Coord; + + // + // Invariants + // + + using Base = TensorOpMultiplicandCrosswise128x4; + +private: + + // + // Data members + // + + Base layout_; + +public: + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorTensorOpMultiplicandCrosswise128x4(Index ldm = 0): layout_(ldm) { } + + /// Ctor + CUTLASS_HOST_DEVICE + RowMajorTensorOpMultiplicandCrosswise128x4(Stride stride): layout_(stride) { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static RowMajorTensorOpMultiplicandCrosswise128x4 packed(TensorCoord const &extent) { + return RowMajorTensorOpMultiplicandCrosswise128x4(extent.row()); + } + + /// Returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (contiguous, strided) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return layout_(PitchLinearCoord(coord.column(), coord.row())); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return layout_.stride(); + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride() { + return layout_.stride(); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const { + return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace layout +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/vector.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/vector.h new file mode 100644 index 0000000000000000000000000000000000000000..188dfdcefbc013049c09f8caae4dfa590490d2aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/layout/vector.h @@ -0,0 +1,105 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines layout functions used for rank=1 vectors. +*/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/coord.h" + +namespace cutlass { +namespace layout { + +/// Tensor layout for densely packed vectors. +class PackedVectorLayout { +public: + /// Logical rank of tensor + static int const kRank = 1; + + /// Rank of stride vector + static int const kStrideRank = 1; + + /// Index type used for coordinates + using Index = int32_t; + + /// Long index type used for offsets + using LongIndex = int64_t; + + /// Logical coordinate + using TensorCoord = Coord; + + /// Stride vector + using Stride = Coord; + +private: + + // + // No actual stride vector stored + // + +public: + + // + // Methods + // + + CUTLASS_HOST_DEVICE + PackedVectorLayout() { } + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static PackedVectorLayout packed(TensorCoord const &size) { + CUTLASS_UNUSED(size); + return PackedVectorLayout(); + } + + /// Returns the offset of a coordinate in linear memory + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const { + return coord[0]; + } + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const { + return make_Coord(1); + } + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &size) const { + return size[0]; + } +}; + +} // namespace layout +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/pipeline/pipeline.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/pipeline/pipeline.hpp new file mode 100644 index 0000000000000000000000000000000000000000..246e6fa47f2e10a5c5806b78ac45d41a2ac85d93 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/pipeline/pipeline.hpp @@ -0,0 +1,36 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +#include "cutlass/pipeline/sm90_pipeline.hpp" +//////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/pipeline/sm90_pipeline.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/pipeline/sm90_pipeline.hpp new file mode 100644 index 0000000000000000000000000000000000000000..f0632830bcfb3245372b5fe3b6e171507c98690c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/pipeline/sm90_pipeline.hpp @@ -0,0 +1,1102 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/detail/dependent_false.hpp" +#include "cute/numeric/integral_constant.hpp" +#include "cute/arch/cluster_sm90.hpp" +#include "cutlass/arch/barrier.h" +#include "cute/util/type_traits.hpp" +#include "cute/container/array.hpp" + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +using namespace cute; + +enum class BarrierStatus : uint32_t { + WaitAgain = 0u, + WaitDone = 1u +}; + +class ArrivalToken { +public: + CUTLASS_HOST_DEVICE + ArrivalToken(BarrierStatus barrier_status) : barrier_status_(barrier_status) {} + + CUTLASS_HOST_DEVICE + ArrivalToken() = delete; + + CUTLASS_HOST_DEVICE + BarrierStatus get() const { + return barrier_status_;; + } + + CUTLASS_HOST_DEVICE + bool operator==(ArrivalToken const& other) const { + return barrier_status_ == other.get(); + } + +private: + BarrierStatus barrier_status_; + + CUTLASS_HOST_DEVICE + friend bool operator==(const ArrivalToken& left, const BarrierStatus& right) { + return left.get() == right; + } + + CUTLASS_HOST_DEVICE + friend bool operator==(const BarrierStatus& left, const ArrivalToken& right) { + return left == right.get(); + } +}; + +class ProducerToken : public ArrivalToken { + using ArrivalToken::ArrivalToken; +}; + +class ConsumerToken : public ArrivalToken { + using ArrivalToken::ArrivalToken; +}; + +// Circular Buffer Index + Associated Phase +// Assumes only one operation possible - i.e., ++ +template +struct PipelineState { + + static constexpr uint32_t Stages = Stages_; + + int index_ = 0; + uint32_t phase_ = 0; + uint32_t count_ = 0; + + CUTLASS_DEVICE + PipelineState(): index_{}, phase_{}, count_{} {} + + CUTLASS_DEVICE + PipelineState(int index, uint32_t phase, uint32_t count) + : index_(index) + , phase_(phase) + , count_(count) {} + + CUTLASS_DEVICE + int index() const { + return index_; + } + + CUTLASS_DEVICE + uint32_t phase() const { + return phase_; + } + + CUTLASS_DEVICE + uint32_t count() const { + return count_; + } + + CUTLASS_DEVICE + void operator++() { + if constexpr (Stages > 0) { + ++index_; + ++count_; + if (index_ == Stages) { + index_ = 0; + phase_ ^= 1; + } + } + } + + CUTLASS_DEVICE + PipelineState& operator=(const PipelineState& other) { + index_ = other.index(); + phase_ = other.phase(); + count_ = other.count(); + return *this; + } + + CUTLASS_DEVICE + PipelineState advance(uint32_t num_iterations) { + if constexpr (Stages > 0) { + // Number of iterations cross over the stage boundary => flipped phase + if ((num_iterations < Stages) && (index_ + num_iterations) >= Stages ) { + phase_ ^= 1; + } + // How many times number of iterations cross over the stage boundary and + // end up on a odd number => flipped phase + if ((num_iterations >= Stages) && (((index_ + num_iterations) / Stages) % 2) == 1) { + phase_ ^= 1; + } + index_ = (index_ + num_iterations) % Stages; + count_ += num_iterations; + } + return *this; + } + + CUTLASS_DEVICE + static PipelineState make_pipeline_state(PipelineState start_state, uint32_t num_iterations) { + return start_state.advance(num_iterations); + } +}; + +template +CUTLASS_DEVICE +PipelineState make_producer_start_state() { + // Producer starts with an opposite phase as the buffers are initially empty + constexpr int InitialProducerStage = 0; + constexpr uint32_t InitialProducerPhase = 1; + constexpr uint32_t InitialProducerCount = 0; + return {InitialProducerStage, InitialProducerPhase, InitialProducerCount}; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// TMA load (producer) Async Pipeline class +// +/////////////////////////////////////////////////////////////////////////////////////////////////// +// Assumptions : Constructor is visible Cluster-wide (as it needs a Cluster-Sync) +// We have exactly one thread elected in the Producer as the "leader" +// Currently, it is optional to elect a leader for the Consumers +template < + int Stages_, + class ClusterShape_ +> +class PipelineTmaAsync { +public : + using ClusterShape = ClusterShape_; + using FullBarrier = cutlass::arch::ClusterTransactionBarrier; + using EmptyBarrier = cutlass::arch::ClusterBarrier; + using ProducerBarrierType = FullBarrier::ValueType; + using ConsumerBarrierType = EmptyBarrier::ValueType; + static constexpr uint32_t Stages = Stages_; + using PipelineState = cutlass::PipelineState; + + struct SharedStorage { + FullBarrier full_barrier_[Stages]; + EmptyBarrier empty_barrier_[Stages]; + }; + + enum class ThreadCategory { + NonParticipant, + Producer, + Consumer, + ProducerConsumer + }; + + struct Params { + uint32_t transaction_bytes = 0; + ThreadCategory role = ThreadCategory::NonParticipant; + uint32_t is_leader = 0; + uint32_t num_consumers = 0; + }; + + // Constructor + CUTLASS_DEVICE + PipelineTmaAsync(SharedStorage& storage, Params params) + : params_(params) + , full_barrier_ptr_(&storage.full_barrier_[0]) + , empty_barrier_ptr_(&storage.empty_barrier_[0]) { + + int warp_idx = canonical_warp_idx(); + int lane_predicate = cute::elect_one_sync(); + auto cluster_shape = ClusterShape{}; + if (warp_idx == 0 && lane_predicate == 1) { + // Barrier FULL init + for (int i = 0; i < Stages; ++i) { + full_barrier_ptr_[i].init(1); + } + uint32_t const num_consumer_warpgroups_per_cluster = params_.num_consumers / NumThreadsPerWarpGroup; + uint32_t const multicast_consumer_arrival_count = (cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1) * + num_consumer_warpgroups_per_cluster; + // Barrier EMPTY init + for (int i = 0; i < Stages; ++i) { + empty_barrier_ptr_[i].init(multicast_consumer_arrival_count); + } + } + // Logic to optimally schedule Empty Arrives + // Goal : To divide SYNCS Empty Arrival duty equally amongst the Warp-Group (128 threads) + dim3 block_id = cute::block_id_in_cluster(); + auto cluster_size = cute::size(cluster_shape); + static constexpr int MaxClusterSize = 16; + static_assert(cluster_size <= MaxClusterSize, "ERROR : Cluster size too large !" ); + + // STEP 1 : Use Cute Layout function to generate an optimal dst block-id (0-15) + if (params_.num_consumers % NumThreadsPerWarpGroup == 0) { + int thread_idx = threadIdx.x % NumThreadsPerWarpGroup; + is_signalling_thread_ = (thread_idx % (NumThreadsPerWarpGroup / MaxClusterSize)) == 0; + auto layout = cute::composition(Swizzle<2,0,-2>{}, + Layout,Stride<_4,_1>>{}); + uint32_t thread_row = warp_idx % 4; + uint32_t thread_col = (thread_idx / 8) % 4; + dst_blockid_ = layout(thread_row, thread_col); + } + else if (params_.num_consumers == 32) { + int thread_idx = threadIdx.x % 32; + is_signalling_thread_ = (thread_idx % (32 / MaxClusterSize)) == 0; + auto layout = Layout,Stride<_4, _1>>{}; + uint32_t thread_row = thread_idx / 8; + uint32_t thread_col = (thread_idx % 8) / 2; + dst_blockid_ = layout(thread_row, thread_col); + } + else { + is_signalling_thread_ = 0; + #ifndef NDEBUG + asm volatile ("brkpt;\n" ::); + #endif + } + + // STEP 2: Find if this dst block-id needs an arrival for this problem + is_signalling_thread_ &= dst_blockid_ < cluster_size; + is_signalling_thread_ &= is_same_row_or_col(dst_blockid_, block_id, cluster_shape); + + cutlass::arch::fence_barrier_init(); + } + + CUTLASS_DEVICE + bool is_same_row_or_col(int dst_block_id, dim3 block_id, ClusterShape cluster_shape) { + return (((dst_block_id % cute::size<0>(cluster_shape)) == block_id.x) || + ( + ((dst_block_id / cute::size<0>(cluster_shape)) == block_id.y) + )); + } + + //////////////////// + // Producer APIs + //////////////////// + // Four member functions are always used in pairs: + // + // * producer_try_acquire and producer_acquire, and + // * consumer_try_wait and consumer_wait. + // + // The two functions with "try" in their names are called "try" functions, + // and the other two are conceptually "finalize" functions. + // The "try" function in each pair starts the process of waiting on the barrier to flip. + // It opportunistically waits for an implementation-dependent timeout. + // Whether or not the barrier has flipped yet, the try function will return a token. + // If the token indicates that the barrier has not flipped, + // then the token must be passed into the corresponding "finalize" function. + // The finalize function will then block until the barrier has flipped. + // If the token indicates that the barrier _has_ flipped, + // then it is still correct to pass it into the finalize function. + // The finalize function will return immediately in that case. + + CUTLASS_DEVICE + ProducerToken producer_try_acquire(PipelineState state, uint32_t skip_wait = false) { + return producer_try_acquire(state.index(), state.phase(), skip_wait); + } + + CUTLASS_DEVICE + void producer_acquire(PipelineState state, ProducerToken barrier_token = {BarrierStatus::WaitAgain}) { + producer_acquire(state.index(), state.phase(), barrier_token); + } + + CUTLASS_DEVICE + void producer_commit(PipelineState state, uint32_t bytes) { + producer_commit(state.index(), bytes); + } + + // Prevents early exit of producer blocks in Cluster. + // This should be called once before kernel exits. + CUTLASS_DEVICE + void producer_tail(PipelineState state) { + for (int count = 0; count < Stages; ++count) { + producer_acquire(state); + ++state; + } + } + + CUTLASS_DEVICE + ProducerBarrierType* producer_get_barrier(PipelineState state) { + return producer_get_barrier(state.index()); + } + + //////////////////// + // Consumer APIs + //////////////////// + CUTLASS_DEVICE + ConsumerToken consumer_try_wait(PipelineState state, uint32_t skip_wait = false) { + return consumer_try_wait(state.index(), state.phase(), skip_wait); + } + + CUTLASS_DEVICE + ConsumerToken consumer_test_wait(PipelineState state, uint32_t skip_wait = false) { + return consumer_test_wait(state.index(), state.phase(), skip_wait); + } + + CUTLASS_DEVICE + void consumer_wait(PipelineState state) { + consumer_wait(state.index(), state.phase()); + } + + CUTLASS_DEVICE + void consumer_wait(PipelineState state, ConsumerToken barrier_token) { + consumer_wait(state.index(), state.phase(), barrier_token); + } + + CUTLASS_DEVICE + void consumer_release(PipelineState state) { + consumer_release(state.index()); + } + +private : + uint32_t dst_blockid_ = 0; + uint32_t is_signalling_thread_ = 0; + FullBarrier *full_barrier_ptr_ = nullptr; + EmptyBarrier *empty_barrier_ptr_ = nullptr; + Params params_; + + CUTLASS_DEVICE + ProducerToken producer_try_acquire(uint32_t stage, uint32_t phase, uint32_t skip_wait) { + if (skip_wait) { + return {BarrierStatus::WaitDone}; + } + uint32_t barrier_status = empty_barrier_ptr_[stage].try_wait(phase); + return {static_cast(barrier_status)}; + } + + CUTLASS_DEVICE + void producer_acquire(uint32_t stage, uint32_t phase, ProducerToken barrier_token) { + if (barrier_token == BarrierStatus::WaitAgain) { + empty_barrier_ptr_[stage].wait(phase); + } + + if (params_.is_leader) { + full_barrier_ptr_[stage].arrive_and_expect_tx(params_.transaction_bytes); + } + #ifndef NDEBUG + if (params_.role == ThreadCategory::Consumer || params_.role == ThreadCategory::NonParticipant) { + asm volatile ("brkpt;\n" ::); + } + + // Most likely you have elected more than one leader + if (params_.is_leader && (threadIdx.x % 32 != 0)) { + asm volatile ("brkpt;\n" ::); + } + #endif + } + + // NOP for TMA based mainloop + CUTLASS_DEVICE + void producer_commit(uint32_t stage, uint32_t bytes) { + // Below code is used only for unit-testing (in the absence of TMA commit) + #if CUTLASS_UNIT_TEST_PIPELINE + if (params_.is_leader) { + // STEP 1 : Commit to self + full_barrier_ptr_[stage].complete_transaction(bytes); + + // STEP 2 : Commit to other blocks in our cluster + auto cluster_shape = ClusterShape{}; + Layout block_layout_in_cluster = make_layout(cluster_shape); + dim3 local_block_id = cute::block_id_in_cluster(); + + CUTLASS_PRAGMA_UNROLL + for(int n = 0; n < size<1>(block_layout_in_cluster); ++n) { + uint32_t dst_block_id = block_layout_in_cluster(local_block_id.x,n,Int<0>{}); + full_barrier_ptr_[stage].complete_transaction(dst_block_id, bytes, n!=local_block_id.y); + } + + CUTLASS_PRAGMA_UNROLL + for(int m = 0; m < size<0>(block_layout_in_cluster); ++m) { + uint32_t dst_block_id = block_layout_in_cluster(m,local_block_id.y,Int<0>{}); + full_barrier_ptr_[stage].complete_transaction(dst_block_id, bytes, m!=local_block_id.x); + } + } + #endif + } + + CUTLASS_DEVICE + ConsumerToken consumer_try_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) { + if (skip_wait) { + return {BarrierStatus::WaitDone}; + } + uint32_t barrier_status = full_barrier_ptr_[stage].try_wait(phase); + return {static_cast(barrier_status)}; + } + + CUTLASS_DEVICE + ConsumerToken consumer_test_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) { + if (skip_wait) { + return {BarrierStatus::WaitDone}; + } + uint32_t barrier_status = full_barrier_ptr_[stage].test_wait(phase); + return {static_cast(barrier_status)}; + } + + // Wait for producer to commit transactions (done by TMA) + CUTLASS_DEVICE + void consumer_wait(uint32_t stage, uint32_t phase) { + full_barrier_ptr_[stage].wait(phase); + } + + // Wait for producer to commit transactions (done by TMA) + CUTLASS_DEVICE + void consumer_wait(uint32_t stage, uint32_t phase, ConsumerToken barrier_token) { + if (barrier_token == BarrierStatus::WaitAgain) { + full_barrier_ptr_[stage].wait(phase); + } + } + + // Consumer signalling Producer of completion + // Ensures all blocks in the Same Row and Column get notifed. + CUTLASS_DEVICE + void consumer_release(uint32_t stage, uint32_t skip = false) { + empty_barrier_ptr_[stage].arrive(dst_blockid_, is_signalling_thread_ & (!skip)); + #ifndef NDEBUG + if (params_.role == ThreadCategory::Producer || params_.role == ThreadCategory::NonParticipant) { + asm volatile ("brkpt;\n" ::); + } + #endif + } + + CUTLASS_DEVICE + ProducerBarrierType* producer_get_barrier(uint32_t stage) { + return reinterpret_cast(&full_barrier_ptr_[stage]); + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// TMA store pipeline class +// producer-only class, no async barriers between threads because consumer is TMA unit +// +/////////////////////////////////////////////////////////////////////////////////////////////////// +template < + int Stages_, + // The number of committed TMA store batches that can be in flight upon return of producer acquire + int UnacquiredStages_ = Stages_-1 +> +class PipelineTmaStore { +public: + static constexpr uint32_t Stages = Stages_; + static_assert(Stages_ > 0); + static_assert(UnacquiredStages_ >= 0); + static constexpr uint32_t UnacquiredStages = static_cast(UnacquiredStages_); + using PipelineState = cutlass::PipelineState; + + struct Params { + bool always_wait = false; + }; + + CUTLASS_DEVICE + PipelineTmaStore(Params params = {}) : params_(params) {} + + //////////////////// + // Producer APIs + //////////////////// + // Wait for the least recently committed batch of TMA stores to complete + CUTLASS_DEVICE + void producer_acquire(PipelineState state) { + producer_acquire(state.index(), state.count()); + } + + // Commit the most recently issued batch of TMA stores + CUTLASS_DEVICE + void producer_commit(PipelineState state) { + producer_commit(state.index(), state.count()); + } + + // Wait for all TMA stores to complete + CUTLASS_DEVICE + void producer_tail([[maybe_unused]] PipelineState state) { + tma_store_wait<0>(); + } + +private: + Params params_; + + // Wait for the least recently committed batch of TMA stores to complete + // or until at most UnacquiredStages TMA store batches are in-flight (if specified) + CUTLASS_DEVICE + void producer_acquire([[maybe_unused]] uint32_t stage, uint32_t count) { + if (params_.always_wait || count > UnacquiredStages) { + tma_store_wait(); + } + } + + // Commit the most recently issued batch of TMA stores + CUTLASS_DEVICE + void producer_commit([[maybe_unused]] uint32_t stage, [[maybe_unused]] uint32_t count) { + tma_store_arrive(); + } +}; + +template <> +class PipelineTmaStore< /* Stages_ = */ 0, /* UnacquiredStages = Stages_ - 1 = */ -1 > { +public: + static constexpr uint32_t Stages = 0; + static constexpr uint32_t UnacquiredStages = 0; + using PipelineState = cutlass::PipelineState; + + struct Params { + bool always_wait = false; + }; + + PipelineTmaStore() = default; + CUTLASS_DEVICE + PipelineTmaStore(Params params) : params_(params) {} + + //////////////////// + // Producer APIs + //////////////////// + + template + CUTLASS_DEVICE + void producer_acquire(PipelineState /* state */, + ThisTemplateParameterExistsOnlyForDependentFalse* /* unused */ = nullptr) { + static_assert(cutlass::detail::dependent_false, + "It is never valid to call PipelineTmaStore<0>::producer_acquire"); + } + + // Commit the most recently issued batch of TMA stores + CUTLASS_DEVICE + void producer_commit(PipelineState state) { + producer_commit(state.index(), state.count()); + } + + // Wait for all TMA stores to complete + CUTLASS_DEVICE + void producer_tail([[maybe_unused]] PipelineState state) { + tma_store_wait<0>(); + } + +private: + Params params_; + + // Commit the most recently issued batch of TMA stores + CUTLASS_DEVICE + void producer_commit([[maybe_unused]] uint32_t stage, [[maybe_unused]] uint32_t count) { + tma_store_arrive(); + } +}; + + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Simple producer-consumer async Pipeline class using producer transaction barriers +// +/////////////////////////////////////////////////////////////////////////////////////////////////// +template +class PipelineTransactionAsync { +public : + using FullBarrier = cutlass::arch::ClusterTransactionBarrier; + using EmptyBarrier = cutlass::arch::ClusterBarrier; + using ProducerBarrierType = FullBarrier::ValueType; + using ConsumerBarrierType = EmptyBarrier::ValueType; + static constexpr uint32_t Stages = Stages_; + using PipelineState = cutlass::PipelineState; + + struct SharedStorage { + cute::array full_barrier_; + cute::array empty_barrier_; + }; + + enum class ThreadCategory { + NonParticipant, + Producer, + Consumer, + ProducerConsumer + }; + + struct Params { + ThreadCategory role = ThreadCategory::NonParticipant; + uint32_t transaction_bytes = 0; + uint32_t producer_arv_count = 1; + uint32_t consumer_arv_count = 1; + uint32_t dst_blockid = cute::block_rank_in_cluster(); + }; + + // Constructor + CUTLASS_DEVICE + PipelineTransactionAsync(SharedStorage& storage, Params const& params) + : params_(params) + , full_barrier_ptr_(storage.full_barrier_.data()) + , empty_barrier_ptr_(storage.empty_barrier_.data()) { + + int warp_idx = canonical_warp_idx(); + int lane_predicate = cute::elect_one_sync(); + + // Barrier FULL, EMPTY init + // Init is done only by thread 0 of the block + if (warp_idx == 0 && lane_predicate == 1) { + for (int i = 0; i < Stages; ++i) { + full_barrier_ptr_[i].init(params.producer_arv_count); + empty_barrier_ptr_[i].init(params.consumer_arv_count); + } + } + cutlass::arch::fence_barrier_init(); + } + + //////////////////// + // Producer APIs + //////////////////// + // Four member functions are always used in pairs: + // + // * producer_try_acquire and producer_acquire, and + // * consumer_try_wait and consumer_wait. + // + // The two functions with "try" in their names are called "try" functions, + // and the other two are conceptually "finalize" functions. + // The "try" function in each pair starts the process of waiting on the barrier to flip. + // It opportunistically waits for an implementation-dependent timeout. + // Whether or not the barrier has flipped yet, the try function will return a token. + // If the token indicates that the barrier has not flipped, + // then the token must be passed into the corresponding "finalize" function. + // The finalize function will then block until the barrier has flipped. + // If the token indicates that the barrier _has_ flipped, + // then it is still correct to pass it into the finalize function. + // The finalize function will return immediately in that case. + CUTLASS_DEVICE + ProducerToken producer_try_acquire(PipelineState state, uint32_t skip_wait = false) { + return producer_try_acquire(state.index(), state.phase(), skip_wait); + } + + CUTLASS_DEVICE + void producer_acquire(PipelineState state, ProducerToken barrier_token = {BarrierStatus::WaitAgain}) { + producer_acquire(state.index(), state.phase(), barrier_token); + } + + // Perform an expect-tx operation on the stage's full barrier. Must be called by 1 thread + CUTLASS_DEVICE + void producer_expect_transaction(PipelineState state) { + producer_expect_transaction(state.index()); + } + + CUTLASS_DEVICE + void producer_commit(PipelineState state) { + producer_commit(state.index()); + } + + // Prevents early exit of producer blocks in Cluster. + // This should be called once before kernel exits. + CUTLASS_DEVICE + void producer_tail(PipelineState state) { + for (int count = 0; count < Stages; ++count) { + producer_acquire(state); + ++state; + } + } + + CUTLASS_DEVICE + ProducerBarrierType* producer_get_barrier(PipelineState state) { + return producer_get_barrier(state.index()); + } + + //////////////////// + // Consumer APIs + //////////////////// + CUTLASS_DEVICE + ConsumerToken consumer_try_wait(PipelineState state, uint32_t skip_wait = false) { + return consumer_try_wait(state.index(), state.phase(), skip_wait); + } + + CUTLASS_DEVICE + ConsumerToken consumer_test_wait(PipelineState state, uint32_t skip_wait = false) { + return consumer_test_wait(state.index(), state.phase(), skip_wait); + } + + CUTLASS_DEVICE + void consumer_wait(PipelineState state, ConsumerToken barrier_token = {BarrierStatus::WaitAgain}) { + consumer_wait(state.index(), state.phase(), barrier_token); + } + + CUTLASS_DEVICE + void consumer_release(PipelineState state) { + consumer_release(state.index()); + } + +private: + FullBarrier *full_barrier_ptr_ = nullptr; + EmptyBarrier *empty_barrier_ptr_ = nullptr; + Params params_; + + CUTLASS_DEVICE + ProducerToken producer_try_acquire(uint32_t stage, uint32_t phase, uint32_t skip_wait) { + if (skip_wait) { + return {BarrierStatus::WaitDone}; + } + uint32_t barrier_status = empty_barrier_ptr_[stage].try_wait(phase); + return {static_cast(barrier_status)}; + } + + CUTLASS_DEVICE + void producer_acquire(uint32_t stage, uint32_t phase, ProducerToken barrier_token) { + if (barrier_token == BarrierStatus::WaitAgain) { + empty_barrier_ptr_[stage].wait(phase); + } + } + + // Perform an expect-tx operation on the stage's full barrier. Must be called by 1 thread + CUTLASS_DEVICE + void producer_expect_transaction(uint32_t stage) { + full_barrier_ptr_[stage].expect_transaction(params_.transaction_bytes); + } + + CUTLASS_DEVICE + void producer_commit(uint32_t stage) { + full_barrier_ptr_[stage].arrive(params_.dst_blockid); + } + + CUTLASS_DEVICE + ProducerBarrierType* producer_get_barrier(uint32_t stage) { + return reinterpret_cast(&full_barrier_ptr_[stage]); + } + + CUTLASS_DEVICE + ConsumerToken consumer_try_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) { + if (skip_wait) { + return {BarrierStatus::WaitDone}; + } + uint32_t barrier_status = full_barrier_ptr_[stage].try_wait(phase); + return {static_cast(barrier_status)}; + } + + CUTLASS_DEVICE + ConsumerToken consumer_test_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) { + if (skip_wait) { + return {BarrierStatus::WaitDone}; + } + uint32_t barrier_status = full_barrier_ptr_[stage].test_wait(phase); + return {static_cast(barrier_status)}; + } + + CUTLASS_DEVICE + void consumer_wait(uint32_t stage, uint32_t phase, ConsumerToken barrier_token) { + if (barrier_token == BarrierStatus::WaitAgain) { + full_barrier_ptr_[stage].wait(phase); + } + } + + CUTLASS_DEVICE + void consumer_release(uint32_t stage, uint32_t skip = false) { + empty_barrier_ptr_[stage].arrive(params_.dst_blockid, (not skip)); + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Simple producer-consumer async Pipeline class +// +/////////////////////////////////////////////////////////////////////////////////////////////////// +template +class PipelineAsync { +public : + using FullBarrier = cutlass::arch::ClusterBarrier; + using EmptyBarrier = cutlass::arch::ClusterBarrier; + using ProducerBarrierType = FullBarrier::ValueType; + using ConsumerBarrierType = EmptyBarrier::ValueType; + static constexpr uint32_t Stages = Stages_; + using PipelineState = cutlass::PipelineState; + + struct SharedStorage { + FullBarrier full_barrier_[Stages]; + EmptyBarrier empty_barrier_[Stages]; + }; + + enum class ThreadCategory { + NonParticipant, + Producer, + Consumer, + ProducerConsumer + }; + + struct Params { + ThreadCategory role = ThreadCategory::NonParticipant; + uint32_t producer_arv_count = 1; + uint32_t consumer_arv_count = 1; + uint32_t dst_blockid = cute::block_rank_in_cluster(); + }; + + // Default assumption when only storage is passed is : + // => single producer, single consumer & they are in the same block (within the Cluster) + CUTLASS_DEVICE + PipelineAsync(SharedStorage& storage) + : PipelineAsync(storage, {}) {} + + CUTLASS_DEVICE + PipelineAsync( + SharedStorage& storage, + Params const& params) : + params_(params), + full_barrier_ptr_(&storage.full_barrier_[0]), + empty_barrier_ptr_(&storage.empty_barrier_[0]) { + + int warp_idx = canonical_warp_idx(); + int lane_predicate = cute::elect_one_sync(); + + // Barrier FULL, EMPTY init + // Init is done only by thread 0 of the block + if (warp_idx == 0 && lane_predicate == 1) { + for (int i = 0; i < Stages; ++i) { + full_barrier_ptr_[i].init(params.producer_arv_count); + empty_barrier_ptr_[i].init(params.consumer_arv_count); + } + } + cutlass::arch::fence_barrier_init(); + } + + //////////////////// + // Producer APIs + //////////////////// + // Four member functions are always used in pairs: + // + // * producer_try_acquire and producer_acquire, and + // * consumer_try_wait and consumer_wait. + // + // The two functions with "try" in their names are called "try" functions, + // and the other two are conceptually "finalize" functions. + // The "try" function in each pair starts the process of waiting on the barrier to flip. + // It opportunistically waits for an implementation-dependent timeout. + // Whether or not the barrier has flipped yet, the try function will return a token. + // If the token indicates that the barrier has not flipped, + // then the token must be passed into the corresponding "finalize" function. + // The finalize function will then block until the barrier has flipped. + // If the token indicates that the barrier _has_ flipped, + // then it is still correct to pass it into the finalize function. + // The finalize function will return immediately in that case. + CUTLASS_DEVICE + ProducerToken producer_try_acquire(PipelineState state, uint32_t skip_wait = false) { + return producer_try_acquire(state.index(), state.phase(), skip_wait); + } + + CUTLASS_DEVICE + void producer_acquire(PipelineState state, ProducerToken barrier_token = {BarrierStatus::WaitAgain}) { + producer_acquire(state.index(), state.phase(), barrier_token); + } + + CUTLASS_DEVICE + void producer_commit(PipelineState state) { + producer_commit(state.index()); + } + + // Prevents early exit of producer blocks in Cluster. + // This should be called once before kernel exits. + CUTLASS_DEVICE + void producer_tail(PipelineState state) { + for (int count = 0; count < Stages; ++count) { + producer_acquire(state); + ++state; + } + } + + CUTLASS_DEVICE + ProducerBarrierType* producer_get_barrier(PipelineState state) { + return producer_get_barrier(state.index()); + } + + //////////////////// + // Consumer APIs + //////////////////// + CUTLASS_DEVICE + ConsumerToken consumer_try_wait(PipelineState state, uint32_t skip_wait = false) { + return consumer_try_wait(state.index(), state.phase(), skip_wait); + } + + CUTLASS_DEVICE + ConsumerToken consumer_test_wait(PipelineState state, uint32_t skip_wait = false) { + return consumer_test_wait(state.index(), state.phase(), skip_wait); + } + + CUTLASS_DEVICE + void consumer_wait(PipelineState state, ConsumerToken barrier_token = {BarrierStatus::WaitAgain}) { + consumer_wait(state.index(), state.phase(), barrier_token); + } + + CUTLASS_DEVICE + void consumer_release(PipelineState state) { + consumer_release(state.index()); + } + +private: + Params params_; + FullBarrier *full_barrier_ptr_; + EmptyBarrier *empty_barrier_ptr_; + + CUTLASS_DEVICE + ProducerToken producer_try_acquire(uint32_t stage, uint32_t phase, uint32_t skip_wait) { + if (skip_wait) { + return {BarrierStatus::WaitDone}; + } + uint32_t barrier_status = empty_barrier_ptr_[stage].try_wait(phase); + return {static_cast(barrier_status)}; + } + + CUTLASS_DEVICE + void producer_acquire(uint32_t stage, uint32_t phase, ProducerToken barrier_token) { + if (barrier_token == BarrierStatus::WaitAgain) { + empty_barrier_ptr_[stage].wait(phase); + } + } + + CUTLASS_DEVICE + void producer_commit(uint32_t stage) { + full_barrier_ptr_[stage].arrive(); + } + + CUTLASS_DEVICE + ProducerBarrierType* producer_get_barrier(uint32_t stage) { + return reinterpret_cast(&full_barrier_ptr_[stage]); + } + + CUTLASS_DEVICE + ConsumerToken consumer_try_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) { + if (skip_wait) { + return {BarrierStatus::WaitDone}; + } + uint32_t barrier_status = full_barrier_ptr_[stage].try_wait(phase); + return {static_cast(barrier_status)}; + } + + CUTLASS_DEVICE + ConsumerToken consumer_test_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) { + if (skip_wait) { + return {BarrierStatus::WaitDone}; + } + uint32_t barrier_status = full_barrier_ptr_[stage].test_wait(phase); + return {static_cast(barrier_status)}; + } + + CUTLASS_DEVICE + void consumer_wait(uint32_t stage, uint32_t phase) { + uint32_t done = full_barrier_ptr_[stage].test_wait(phase); + if (!done) { + full_barrier_ptr_[stage].wait(phase); + } + } + + CUTLASS_DEVICE + void consumer_wait(uint32_t stage, uint32_t phase, ConsumerToken barrier_token) { + if (barrier_token == BarrierStatus::WaitAgain) { + full_barrier_ptr_[stage].wait(phase); + } + } + + CUTLASS_DEVICE + void consumer_release(uint32_t stage) { + empty_barrier_ptr_[stage].arrive(params_.dst_blockid); + } +}; + + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Barrier to ensure an Ordered Sequence between +// SequenceLength number of groups (each with group_size participants) executing SequenceDepth Stages +// i.e., for all i < j - only after id "i" arrives at a particular stage "m" +// will the wait() for id "j" succeed for the same stage +// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +class OrderedSequenceBarrier { +public : + using Barrier = cutlass::arch::ClusterBarrier; + + struct SharedStorage { + Barrier barrier_[SequenceDepth][SequenceLength]; + }; + + struct Params { + uint32_t group_id; + uint32_t group_size; + }; + +private : + // In future this Params object can be replaced easily with a CG object + Params params_; + Barrier *barrier_ptr_; + PipelineState stage_; + + static constexpr int Depth = SequenceDepth; + static constexpr int Length = SequenceLength; + +public: + OrderedSequenceBarrier() = delete; + OrderedSequenceBarrier(const OrderedSequenceBarrier&) = delete; + OrderedSequenceBarrier(OrderedSequenceBarrier&&) = delete; + OrderedSequenceBarrier& operator=(const OrderedSequenceBarrier&) = delete; + OrderedSequenceBarrier& operator=(OrderedSequenceBarrier&&) = delete; + ~OrderedSequenceBarrier() = default; + + CUTLASS_DEVICE + OrderedSequenceBarrier(SharedStorage& storage, Params const& params) : + params_(params), + barrier_ptr_(&storage.barrier_[0][0]), + // Group 0 - starts with an opposite phase + stage_({0, params.group_id == 0, 0}) { + int warp_idx = canonical_warp_idx(); + int lane_predicate = cute::elect_one_sync(); + + // Barrier FULL, EMPTY init + // Init is done only by the one elected thread of the block + if (warp_idx == 0 && lane_predicate == 1) { + for (int d = 0; d < Depth; ++d) { + for (int l = 0; l < Length; ++l) { + barrier_ptr_[d * Length + l].init(params.group_size); + } + } + } + cutlass::arch::fence_barrier_init(); + } + + // Wait on a stage to be unlocked + CUTLASS_DEVICE + void wait() { + get_barrier_for_current_stage(params_.group_id).wait(stage_.phase()); + } + + // Signal completion of Stage and move to the next stage + // (group_id) signals to (group_id+1) + CUTLASS_DEVICE + void arrive() { + int signalling_id = (params_.group_id + 1) % Length; + get_barrier_for_current_stage(signalling_id).arrive(); + ++stage_; + } + + CUTLASS_DEVICE + void advance() { + ++stage_; + } + +private: + + CUTLASS_DEVICE + Barrier& get_barrier_for_current_stage(int group_id) { + return barrier_ptr_[stage_.index() * Length + group_id]; + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // end namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/platform/platform.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/platform/platform.h new file mode 100644 index 0000000000000000000000000000000000000000..4e8ee96f824d45b49104858fe8004bb94732dc73 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/platform/platform.h @@ -0,0 +1,972 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +/** + * \file + * \brief C++ features that may be otherwise unimplemented for CUDA device functions. + * + * This file has three components: + * + * (1) Macros: + * - Empty macro defines for C++ keywords not supported by the current + * version of C++. These simply allow compilation to proceed (but do + * not provide the added semantics). + * - \p noexcept + * - \p constexpr + * - \p nullptr + * - \p static_assert + * + * - Macro functions that we need in constant expressions because the + * C++ equivalents require constexpr compiler support. These are + * prefixed with \p __NV_STD_* + * - \p __NV_STD_MAX + * - \p __NV_STD_MIN + * + * (2) Re-implementations of STL functions and types: + * - C++ features that need the \p __device__ annotation. These are + * placed into the \p platform namespace. + * - \p abs + * - \p plus + * - \p less + * - \p greater + * - \p min + * - \p max + * - \p methods on std::pair (==, !=, <, <=, >, >=, and make_pair()) + * + * (3) Stop-gap implementations of unsupported STL functions and types: + * - STL functions and types defined by C++ 11/14/17/etc. that are not + * provided by the current version of C++. These are placed into the + * \p platform namespace + * - \p integral_constant + * - \p nullptr_t + * - \p true_type + * - \p false_type + * - \p bool_constant + * - \p enable_if + * - \p conditional + * - \p is_same + * - \p is_base_of + * - \p remove_const + * - \p remove_volatile + * - \p remove_cv + * - \p is_volatile + * - \p is_pointer + * - \p is_void + * - \p is_integral + * - \p is_floating_point + * - \p is_arithmetic + * - \p is_fundamental + * - \p is_trivially_copyable + * - \p alignment_of + * - \p aligned_storage + * + * The idea is that, as we drop support for older compilers, we can simply #define + * the \p __NV_STD_XYZ macros and \p platform namespace to alias their C++ + * counterparts (or trivially find-and-replace their occurrences in code text). + */ + +/* + Note: CUTLASS 3x increases the host compiler requirements to C++17. However, certain + existing integrations of CUTLASS require C++11 host compilers. + + Until this requirement can be lifted, certain headers with this annotation are required + to be remain consistent with C++11 syntax. + + C++11 compatibility is enforced by `cutlass_test_unit_core_cpp11`. +*/ + +//----------------------------------------------------------------------------- +// Dependencies +//----------------------------------------------------------------------------- + +#if defined(__CUDACC_RTC__) +#include +#include +#include +#include +#include +#else +#include +#endif + +#if !defined(__CUDACC_RTC__) +//----------------------------------------------------------------------------- +// Include STL files that platform provides functionality for +//----------------------------------------------------------------------------- + +#include // Minimum/maximum operations +#include // nullptr_t +#include // Arithmetic operations +#include // For methods on std::pair +#if (!defined(_MSC_VER) && (__cplusplus >= 201103L)) || (defined(_MSC_VER) && (_MS_VER >= 1500)) +#include // For integral constants, conditional metaprogramming, and type traits +#endif + +#include "cutlass/cutlass.h" + +#endif + +//----------------------------------------------------------------------------- +// OS +//----------------------------------------------------------------------------- +#if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) +#define CUTLASS_OS_WINDOWS +#endif + +/****************************************************************************** + * Macros + ******************************************************************************/ +/// std +#if !defined(CUTLASS_STL_NAMESPACE) +#if defined(__CUDACC_RTC__) +#define CUTLASS_STL_NAMESPACE cuda::std +#else +#define CUTLASS_STL_NAMESPACE std +#endif +#endif + +/// builtin_unreachable +#if !defined(CUTLASS_GCC_UNREACHABLE) +# if defined(__clang__) || defined(__GNUC__) +# define CUTLASS_GCC_UNREACHABLE __builtin_unreachable() +# else +# define CUTLASS_GCC_UNREACHABLE +# endif +#endif + +//----------------------------------------------------------------------------- +// Keywords +//----------------------------------------------------------------------------- + +/// noexcept, constexpr +#if (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1900)) +#ifndef noexcept +#define noexcept +#endif +#ifndef constexpr +#define constexpr +#endif +#endif + +/// nullptr +#if (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1310)) +#ifndef nullptr +#define nullptr 0 +#endif +#endif + +/// static_assert +#if (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1600)) +#ifndef static_assert +#define __platform_cat_(a, b) a##b +#define __platform_cat(a, b) __platform_cat_(a, b) +#define static_assert(__e, __m) typedef int __platform_cat(AsSeRt, __LINE__)[(__e) ? 1 : -1] +#endif +#endif + +//----------------------------------------------------------------------------- +// Functions +//----------------------------------------------------------------------------- + +/// Select maximum(a, b) +#ifndef __NV_STD_MAX +#define __NV_STD_MAX(a, b) (((b) > (a)) ? (b) : (a)) +#endif + +/// Select minimum(a, b) +#ifndef __NV_STD_MIN +#define __NV_STD_MIN(a, b) (((b) < (a)) ? (b) : (a)) +#endif + +/****************************************************************************** + * Re-implementations + ******************************************************************************/ +namespace cutlass { +namespace platform { + +//----------------------------------------------------------------------------- +// Abs operations +//----------------------------------------------------------------------------- + +#if defined(__CUDACC_RTC__) +/// std::abs +CUTLASS_HOST_DEVICE constexpr int abs(int a) { + return (a < 0) ? -a : a; +} +CUTLASS_HOST_DEVICE constexpr long long abs(long long a) { + return (a < 0) ? -a : a; +} +#else +using std::abs; +#endif + +//----------------------------------------------------------------------------- +// Minimum/maximum operations +//----------------------------------------------------------------------------- + +/// std::min +template +CUTLASS_HOST_DEVICE constexpr const T& min(const T& a, const T& b) { + return (b < a) ? b : a; +} + +/// std::max +template +CUTLASS_HOST_DEVICE constexpr const T& max(const T& a, const T& b) { + return (a < b) ? b : a; +} + +#if !defined(__CUDACC_RTC__) +//----------------------------------------------------------------------------- +// Methods on std::pair +//----------------------------------------------------------------------------- + +using std::pair; + +template +CUTLASS_HOST_DEVICE constexpr bool operator==(const pair& lhs, const pair& rhs) { + return (lhs.first == rhs.first) && (lhs.second == rhs.second); +} + +template +CUTLASS_HOST_DEVICE constexpr bool operator!=(const pair& lhs, const pair& rhs) { + return (lhs.first != rhs.first) && (lhs.second != rhs.second); +} + +template +CUTLASS_HOST_DEVICE constexpr bool operator<(const pair& lhs, const pair& rhs) { + return (lhs.first < rhs.first) ? true : (rhs.first < lhs.first) ? false + : (lhs.second < rhs.second); +} + +template +CUTLASS_HOST_DEVICE constexpr bool operator<=(const pair& lhs, const pair& rhs) { + return !(rhs < lhs); +} + +template +CUTLASS_HOST_DEVICE constexpr bool operator>(const pair& lhs, const pair& rhs) { + return (rhs < lhs); +} + +template +CUTLASS_HOST_DEVICE constexpr bool operator>=(const pair& lhs, const pair& rhs) { + return !(lhs < rhs); +} + +template +CUTLASS_HOST_DEVICE std::pair make_pair(T1 t, T2 u) { + std::pair retval; + retval.first = t; + retval.second = u; + return retval; +} +#endif + +} // namespace platform + +/****************************************************************************** + * Implementations of C++ 11/14/17/... STL features + ******************************************************************************/ + +namespace platform { + +//----------------------------------------------------------------------------- +// Integral constant helper types +//----------------------------------------------------------------------------- + +#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500)) + +/// std::integral_constant +template +struct integral_constant; + +/// std::integral_constant +template +struct integral_constant { + static const value_t value = V; + + typedef value_t value_type; + typedef integral_constant type; + + CUTLASS_HOST_DEVICE operator value_type() const { return value; } + + CUTLASS_HOST_DEVICE const value_type operator()() const { return value; } +}; + +#else + +using std::integral_constant; +using std::pair; + +#endif + +/// The type used as a compile-time boolean with true value. +typedef integral_constant true_type; + +/// The type used as a compile-time boolean with false value. +typedef integral_constant false_type; + +#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus <= 201402L)) || (defined(_MSC_VER) && (_MSC_VER < 1900)) + +/// std::bool_constant +template +struct bool_constant : platform::integral_constant {}; + +#else + +using std::bool_constant; + +#endif + +#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1700)) + +/// std::nullptr_t +struct nullptr_t {}; + +#else + +using std::nullptr_t; + +#endif + +//----------------------------------------------------------------------------- +// Conditional metaprogramming +//----------------------------------------------------------------------------- + +#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201700L)) || (defined(_MSC_VER) && (_MSC_VER < 1600)) + +/// std::enable_if (true specialization) +template +struct enable_if { + typedef T type; +}; + +/// std::enable_if (false specialization) +template +struct enable_if {}; + +/// std::conditional (true specialization) +template +struct conditional { + typedef T type; +}; + +/// std::conditional (false specialization) +template +struct conditional { + typedef F type; +}; + +#else + +using std::enable_if; +using std::conditional; + +#endif + +#if (201703L <=__cplusplus) +/// std::conditional_t +using CUTLASS_STL_NAMESPACE::conditional_t; +#endif + +//----------------------------------------------------------------------------- +// Const/volatility specifiers +//----------------------------------------------------------------------------- + +#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201703L)) || (defined(_MSC_VER) && (_MSC_VER < 1500)) + +/// std::remove_const (non-const specialization) +template +struct remove_const { + typedef T type; +}; + +/// std::remove_const (const specialization) +template +struct remove_const { + typedef T type; +}; + +/// std::remove_volatile (non-volatile specialization) +template +struct remove_volatile { + typedef T type; +}; + +/// std::remove_volatile (volatile specialization) +template +struct remove_volatile { + typedef T type; +}; + +/// std::remove_cv +template +struct remove_cv { + typedef typename remove_volatile::type>::type type; +}; + +#else + +using std::remove_const; +using std::remove_volatile; +using std::remove_cv; + +#endif + +#if (201703L <=__cplusplus) + +/// std::remove_cv_t +using CUTLASS_STL_NAMESPACE::remove_cv_t; +/// std::remove_reference_t +using CUTLASS_STL_NAMESPACE::remove_reference_t; + +// C++20 +// using std::remove_cvref; +template +struct remove_cvref { + using type = remove_cv_t>; +}; + +// C++20 +// using std::remove_cvref_t; +template +using remove_cvref_t = typename remove_cvref::type; + +#endif + + +//----------------------------------------------------------------------------- +// Type relationships +//----------------------------------------------------------------------------- + +#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500)) + +/// std::is_same (false specialization) +template +struct is_same : false_type {}; + +/// std::is_same (true specialization) +template +struct is_same : true_type {}; + +/// Helper for std::is_base_of +template +struct is_base_of_helper { + typedef char (&yes)[1]; + typedef char (&no)[2]; + + template + struct dummy { + CUTLASS_HOST_DEVICE operator B*() const; + CUTLASS_HOST_DEVICE operator D*(); + }; + + template + CUTLASS_HOST_DEVICE static yes check(DerivedT*, T); + + CUTLASS_HOST_DEVICE static no check(BaseT*, int); + + static const bool value = sizeof(check(dummy(), int())) == sizeof(yes); +}; + +/// std::is_base_of +template +struct is_base_of + : integral_constant::type, + typename remove_cv::type>::value) || + (is_same::type, + typename remove_cv::type>::value)> {}; + +#else + +using std::is_same; +using std::is_base_of; + +#endif + +//----------------------------------------------------------------------------- +// Type properties +//----------------------------------------------------------------------------- + +#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500)) + +/// std::is_volatile +template +struct is_volatile : false_type {}; +template +struct is_volatile : true_type {}; + +/// Helper for std::is_pointer (false specialization) +template +struct is_pointer_helper : false_type {}; + +/// Helper for std::is_pointer (true specialization) +template +struct is_pointer_helper : true_type {}; + +/// std::is_pointer +template +struct is_pointer : is_pointer_helper::type> {}; + +/// std::is_void +template +struct is_void : is_same::type> {}; + +/// std::is_integral +template +struct is_integral : false_type {}; +template <> +struct is_integral : true_type {}; +template <> +struct is_integral : true_type {}; +template <> +struct is_integral : true_type {}; +template <> +struct is_integral : true_type {}; +template <> +struct is_integral : true_type {}; +template <> +struct is_integral : true_type {}; +template <> +struct is_integral : true_type {}; +template <> +struct is_integral : true_type {}; +template <> +struct is_integral : true_type {}; +template <> +struct is_integral : true_type {}; +template <> +struct is_integral : true_type {}; +template +struct is_integral : is_integral {}; +template +struct is_integral : is_integral {}; +template +struct is_integral : is_integral {}; + +/// std::is_floating_point +template +struct is_floating_point + : integral_constant::type>::value || + is_same::type>::value)> {}; + +/// std::is_arithmetic +template +struct is_arithmetic + : integral_constant::value || is_floating_point::value)> {}; + +/// std::is_fundamental +template +struct is_fundamental + : integral_constant::value || is_void::value || + is_same::type>::value)> {}; + +#else + +using std::is_volatile; +using std::is_pointer; +using std::is_void; +using std::is_integral; +using std::is_floating_point; +using std::is_arithmetic; +using std::is_fundamental; + +#endif + +#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1800)) || \ + (defined(__GNUG__) && (__GNUC__ < 5)) + +/** + * std::is_trivially_copyable + * + * This implementation only evaluates true if T is fundamental or pointer + * + * Without help from partial template specializations provided by the user for + * a specific class or struct, this trait will never report that the specified + * class or struct is trivially-copyable ; this is always safe, + * if possibly sub-optimal. + */ +template +struct is_trivially_copyable + : integral_constant::value || is_pointer::value)> {}; + +#else + +using std::is_trivially_copyable; + +#endif + +#if (201703L <=__cplusplus) + +/// std::is_unsigned_v +using CUTLASS_STL_NAMESPACE::is_integral_v; +/// std::is_unsigned_v +using CUTLASS_STL_NAMESPACE::is_unsigned_v; + +#endif + +//----------------------------------------------------------------------------- +// bit_cast +//----------------------------------------------------------------------------- + +template< class To, class From > +constexpr To CUTLASS_HOST_DEVICE bit_cast(const From& from ) noexcept; + +template +constexpr To CUTLASS_HOST_DEVICE bit_cast(const From& src) noexcept +{ + static_assert(sizeof(To) == sizeof(From), "sizes must match"); + return reinterpret_cast(src); +} + +//----------------------------------------------------------------------------- +// Alignment and layout utilities +//----------------------------------------------------------------------------- + +#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500)) + +/// std::alignment_of +template +struct alignment_of { + struct pad { + value_t val; + char byte; + }; + + enum { value = sizeof(pad) - sizeof(value_t) }; +}; + +#else + +template +struct alignment_of : std::alignment_of {}; + +#endif + +/* 16B specializations where 32-bit Win32 host compiler disagrees with device compiler */ +template <> +struct alignment_of { + enum { value = 16 }; +}; +template <> +struct alignment_of { + enum { value = 16 }; +}; +template <> +struct alignment_of { + enum { value = 16 }; +}; +template <> +struct alignment_of { + enum { value = 16 }; +}; +template <> +struct alignment_of { + enum { value = 16 }; +}; +template <> +struct alignment_of { + enum { value = 16 }; +}; +template <> +struct alignment_of { + enum { value = 16 }; +}; +template <> +struct alignment_of { + enum { value = 16 }; +}; +template <> +struct alignment_of { + enum { value = 16 }; +}; +template <> +struct alignment_of { + enum { value = 16 }; +}; +template <> +struct alignment_of { + enum { value = 16 }; +}; + +// Specializations for volatile/const qualified types +template +struct alignment_of : alignment_of {}; +template +struct alignment_of : alignment_of {}; +template +struct alignment_of : alignment_of {}; + +#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1800)) + +template +struct aligned_chunk; +template <> +struct __align__(1) aligned_chunk<1> { + uint8_t buff; +}; +template <> +struct __align__(2) aligned_chunk<2> { + uint16_t buff; +}; +template <> +struct __align__(4) aligned_chunk<4> { + uint32_t buff; +}; +template <> +struct __align__(8) aligned_chunk<8> { + uint32_t buff[2]; +}; +template <> +struct __align__(16) aligned_chunk<16> { + uint32_t buff[4]; +}; +template <> +struct __align__(32) aligned_chunk<32> { + uint32_t buff[8]; +}; +template <> +struct __align__(64) aligned_chunk<64> { + uint32_t buff[16]; +}; +template <> +struct __align__(128) aligned_chunk<128> { + uint32_t buff[32]; +}; +template <> +struct __align__(256) aligned_chunk<256> { + uint32_t buff[64]; +}; +template <> +struct __align__(512) aligned_chunk<512> { + uint32_t buff[128]; +}; +template <> +struct __align__(1024) aligned_chunk<1024> { + uint32_t buff[256]; +}; +template <> +struct __align__(2048) aligned_chunk<2048> { + uint32_t buff[512]; +}; +template <> +struct __align__(4096) aligned_chunk<4096> { + uint32_t buff[1024]; +}; + +/// std::aligned_storage +template +struct aligned_storage { + typedef aligned_chunk type[Len / sizeof(aligned_chunk)]; +}; + +#else + +using std::aligned_storage; + +#endif + +#if !defined(__CUDACC_RTC__) +/// Default deleter +template +struct default_delete { + void operator()(T* ptr) const { delete ptr; } +}; + +/// Partial specialization for deleting array types +template +struct default_delete { + void operator()(T* ptr) const { delete[] ptr; } +}; + +/// std::unique_ptr +template > +class unique_ptr { + public: + typedef T* pointer; + typedef T element_type; + typedef Deleter deleter_type; + + private: + /// Pointer to memory + pointer _ptr; + + /// Deleter + deleter_type _deleter; + + public: + unique_ptr() : _ptr(nullptr) {} + unique_ptr(pointer p) : _ptr(p) {} + + ~unique_ptr() { + if (_ptr) { + _deleter(_ptr); + } + } + /// Returns a pointer to the managed object or nullptr if no object is owned. + pointer get() const noexcept { return _ptr; } + + /// Releases ownership of the managed object, if any + pointer release() noexcept { + pointer p(_ptr); + _ptr = nullptr; + return p; + } + + /// Replaces the managed object, deleting the old object. + void reset(pointer p = pointer()) noexcept { + pointer old_ptr = _ptr; + _ptr = p; + if (old_ptr != nullptr) { + get_deleter()(old_ptr); + } + } + + /// Swaps the managed objects with *this and another unique_ptr + void swap(unique_ptr& other) noexcept { std::swap(_ptr, other._ptr); } + + /// Returns the deleter object + Deleter& get_deleter() noexcept { return _deleter; } + + /// Returns the deleter object + Deleter const& get_deleter() const noexcept { return _deleter; } + + /// Checks whether an object is owned + operator bool() const noexcept { return _ptr != nullptr; } + + /// Dereferences the unique_ptr + T& operator*() const { return *_ptr; } + + /// Returns a pointer to the managed object + pointer operator->() const noexcept { return _ptr; } + + /// Array access to managed object + T& operator[](size_t i) const { return _ptr[i]; } +}; + +/// Specializes the swap algorithm +template +void swap(unique_ptr& lhs, unique_ptr& rhs) noexcept { + lhs.swap(rhs); +} +#endif + +/// std::numeric_limits +template +struct numeric_limits; + +template <> +struct numeric_limits { + CUTLASS_HOST_DEVICE + static constexpr int32_t lowest() noexcept { return -2147483647 - 1;} + CUTLASS_HOST_DEVICE + static constexpr int32_t max() noexcept { return 2147483647;} + static constexpr bool is_integer = true; +}; + +template <> +struct numeric_limits { + CUTLASS_HOST_DEVICE + static constexpr int16_t lowest() noexcept { return -32768;} + CUTLASS_HOST_DEVICE + static constexpr int16_t max() noexcept { return 32767;} + static constexpr bool is_integer = true; +}; + +template <> +struct numeric_limits { + CUTLASS_HOST_DEVICE + static constexpr int8_t lowest() noexcept { return -128;} + CUTLASS_HOST_DEVICE + static constexpr int8_t max() noexcept { return 127;} + static constexpr bool is_integer = true; +}; + + +template <> +struct numeric_limits { + CUTLASS_HOST_DEVICE + static constexpr uint32_t lowest() noexcept { return 0;} + CUTLASS_HOST_DEVICE + static constexpr uint32_t max() noexcept { return 4294967295U;} + static constexpr bool is_integer = true; +}; + +template <> +struct numeric_limits { + CUTLASS_HOST_DEVICE + static constexpr uint16_t lowest() noexcept { return 0;} + CUTLASS_HOST_DEVICE + static constexpr uint16_t max() noexcept { return 65535U;} + static constexpr bool is_integer = true; +}; + +template <> +struct numeric_limits { + CUTLASS_HOST_DEVICE + static constexpr uint8_t lowest() noexcept { return 0;} + CUTLASS_HOST_DEVICE + static constexpr uint8_t max() noexcept { return 255U;} + static constexpr bool is_integer = true; +}; + +#if !defined(__CUDACC_RTC__) +template <> +struct numeric_limits { + CUTLASS_HOST_DEVICE + static constexpr float infinity() noexcept { return bit_cast(0x7f800000);} + static constexpr bool is_integer = false; + static constexpr bool has_infinity = true; +}; +#endif + +/// std::float_round_style +using CUTLASS_STL_NAMESPACE::float_round_style; +using CUTLASS_STL_NAMESPACE::round_indeterminate; +using CUTLASS_STL_NAMESPACE::round_toward_zero; +using CUTLASS_STL_NAMESPACE::round_to_nearest; +using CUTLASS_STL_NAMESPACE::round_toward_infinity; +using CUTLASS_STL_NAMESPACE::round_toward_neg_infinity; + +/// std::float_denorm_style +using CUTLASS_STL_NAMESPACE::float_denorm_style; +using CUTLASS_STL_NAMESPACE::denorm_indeterminate; +using CUTLASS_STL_NAMESPACE::denorm_absent; +using CUTLASS_STL_NAMESPACE::denorm_present; + +} // namespace platform +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/device/reduce_split_k.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/device/reduce_split_k.h new file mode 100644 index 0000000000000000000000000000000000000000..92e1f61d2d93918cf4b8d23bb64a0642da8fa80c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/device/reduce_split_k.h @@ -0,0 +1,223 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Kernel performing a reduction over densely packed tensors in global memory +*/ + +#pragma once + +#include "cutlass/device_kernel.h" +#include "cutlass/reduction/kernel/reduce_split_k.h" +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reduction { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ReductionKernel_ +> +class ReduceSplitK { +public: + using ReductionKernel = ReductionKernel_; + + using Shape = typename ReductionKernel::Shape; + using ReductionOp = typename ReductionKernel::ReductionOp; + using OutputOp = typename ReductionKernel::OutputOp; + + using ElementWorkspace = typename ReductionKernel::ElementWorkspace; + using ElementAccumulator = typename ReductionKernel::ElementAccumulator; + using ElementOutput = typename ReductionKernel::ElementOutput; + + using WorkspaceTensorRef = typename ReductionKernel::WorkspaceTensorRef; + using OutputTensorRef = typename ReductionKernel::OutputTensorRef; + + using StrideIndex = typename ReductionKernel::StrideIndex; + + /// Argument structure + struct Arguments { + + // + // Data members + // + + MatrixCoord problem_size; + int partitions; + size_t partition_stride; + WorkspaceTensorRef workspace; + OutputTensorRef destination; + OutputTensorRef source; + typename OutputOp::Params output; + typename ReductionOp::Params reduction; + + // + // Methods + // + + /// Default ctor + CUTLASS_HOST_DEVICE + Arguments() : + problem_size(0, 0), + partitions(1), + partition_stride(0) { } + + CUTLASS_HOST_DEVICE + Arguments( + MatrixCoord const & problem_size + ): + problem_size(problem_size) { } + + CUTLASS_HOST_DEVICE + Arguments( + MatrixCoord problem_size_, + int partitions_, + size_t partition_stride_, + WorkspaceTensorRef workspace_, + OutputTensorRef destination_, + OutputTensorRef source_, + typename OutputOp::Params output_ = typename OutputOp::Params(), + typename ReductionOp::Params reduction_ = typename ReductionOp::Params() + ): + problem_size(problem_size_), + partitions(partitions_), + partition_stride(partition_stride_), + workspace(workspace_), + destination(destination_), + source(source_), + output(output_), + reduction(reduction_) + { + + } + + }; + +private: + /// Kernel parameters object + typename ReductionKernel::Params params_; + +public: + /// Constructs Reduction SplitK + ReduceSplitK() { } + + /// Determines whether the ReduceSplitK can execute the given problem. + static Status can_implement(Arguments const &args) { + + return Status::kSuccess; + } + + /// Gets the workspace size + static size_t get_workspace_size(Arguments const &args) { + // needs no additional workspace + return 0; + } + + /// Initializes Reduction state from arguments. + Status initialize( + Arguments const &args, + void *workspace = nullptr, + cudaStream_t stream = nullptr) { + + // initialize the params structure from the arguments + params_ = typename ReductionKernel::Params( + args.problem_size, + args.partitions, + args.partition_stride, + args.workspace, + args.destination, + args.source, + args.output, + args.reduction + ); + + return Status::kSuccess; + + } + + /// Initializes Reduction kernel state from arguments. + Status update(Arguments const &args, void *workspace = nullptr) { + + // update the params structure from the arguments + params_.workspace.reset(args.workspace.non_const_ref().data()); + params_.destination.reset(args.destination.non_const_ref().data()); + params_.source.reset(args.source.non_const_ref().data()); + params_.output = args.output; + params_.reduction = args.reduction; + + return Status::kSuccess; + } + + /// Runs the kernel using initialized state. + Status run(cudaStream_t stream = nullptr) { + + // + // Launch reduction kernel + // + dim3 block = ReductionKernel::block_shape(); + dim3 grid = ReductionKernel::grid_shape(params_.problem_size); + + Kernel<<< grid, block, 0, stream >>>(params_); + + cudaError_t result = cudaGetLastError(); + + return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; + } + + + /// Runs the kernel using initialized state. + Status operator()(cudaStream_t stream = nullptr) { + return run(stream); + } + + /// Runs the kernel using initialized state. + Status operator()( + Arguments const &args, + void *workspace = nullptr, + cudaStream_t stream = nullptr) { + + Status status = initialize(args, workspace, stream); + + if (status == Status::kSuccess) { + status = run(stream); + } + + return status; + } + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace reduction +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/device/tensor_reduce.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/device/tensor_reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..31d50f6474ae5e827415801b2341411b58cad0d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/device/tensor_reduce.h @@ -0,0 +1,264 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Kernel performing a reduction over one or more ranks of an affine tensor +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/fast_math.h" +#include "cutlass/numeric_types.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/device_kernel.h" + +#include "cutlass/reduction/device/tensor_reduce_affine_strided.h" +#include "cutlass/reduction/device/tensor_reduce_affine_contiguous.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reduction { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tensor reduction operator on specific CUTLASS layouts over exactly one index +template < + typename ElementOutput_, + typename ElementSource_, + typename Layout_, + typename ReductionOp_, + int VectorLength_ = 1, + typename ElementCompute_ = ElementOutput_ +> +struct TensorReduction { + + using ElementOutput = ElementOutput_; + using ElementSource = ElementSource_; + using Layout = Layout_; + using ReductionOp = ReductionOp_; + static int const kVectorLength = VectorLength_; + using ElementCompute = ElementCompute_; + + using TensorCoord = typename Layout::TensorCoord; + + /// Reduction operator + using ReductionDeviceStridedOperator = TensorReductionAffineStrided< + 4, 3, ElementOutput, ElementSource, ReductionOp, kVectorLength, ElementCompute + >; + + using ReductionDeviceContiguousOperator = TensorReductionAffineContiguous< + 4, 3, ElementOutput, ElementSource, ReductionOp, kVectorLength, ElementCompute + >; + + // + // Data members + // + + ReductionDeviceStridedOperator reduction_strided; + ReductionDeviceContiguousOperator reduction_contiguous; + int reduction_index; + + // + // Methods + // + + /// + TensorReduction( + TensorCoord extent, + int reduction_index_ + ): + reduction_index(reduction_index_) { + + Coord<4> extent_affine; + + switch (reduction_index) { + case 0: + extent_affine[0] = extent[1]; + extent_affine[1] = extent[2]; + extent_affine[2] = extent[0]; + extent_affine[3] = extent[3]; + break; + case 1: + extent_affine[0] = extent[0]; + extent_affine[1] = extent[2]; + extent_affine[2] = extent[1]; + extent_affine[3] = extent[3]; + break; + case 2: + extent_affine[0] = extent[0]; + extent_affine[1] = extent[1]; + extent_affine[2] = extent[2]; + extent_affine[3] = extent[3]; + break; + case 3: + extent_affine[0] = extent[0]; + extent_affine[1] = extent[1]; + extent_affine[2] = extent[2]; + extent_affine[3] = extent[3]; + break; + default: break; + } + + if (reduction_index == 3) { + reduction_contiguous = ReductionDeviceContiguousOperator(extent_affine); + } + else { + reduction_strided = ReductionDeviceStridedOperator(extent_affine); + } + } + + /// Simple check to verify the object is initialized correctly + bool good() const { + if (reduction_index == 3) { + return reduction_contiguous.good(); + } + return reduction_strided.good(); + } + + /// Size of one workspace + int64_t workspace_stride() const { + if (reduction_index == 3) { + return reduction_contiguous.workspace_stride(); + } + else { + return reduction_strided.workspace_stride(); + } + } + + /// Returns the size (in bytes) of a temporary workspace needed for reduction across CTAs + int64_t workspace_size() const { + if (reduction_index == 3) { + return reduction_contiguous.workspace_size(); + } + else { + return reduction_strided.workspace_size(); + } + } + + /// Helper to use overloaded function call operator + Status reduce( + TensorRef dst_ref, + TensorRef src_ref, + void *device_workspace_ptr = nullptr, + ElementCompute reduction_identity = ElementCompute(), + ReductionOp reduction_op = ReductionOp(), + cudaStream_t stream = nullptr) { + + int64_t src_stride[3]; + int64_t dst_stride[3]; + + switch (reduction_index) { + case 0: + src_stride[0] = src_ref.stride()[1]; + src_stride[1] = src_ref.stride()[0]; + src_stride[2] = src_ref.stride()[2]; + dst_stride[0] = dst_ref.stride()[1]; + dst_stride[1] = dst_ref.stride()[0]; + break; + case 1: + src_stride[0] = src_ref.stride()[2]; + src_stride[1] = src_ref.stride()[0]; + src_stride[2] = src_ref.stride()[1]; + dst_stride[0] = dst_ref.stride()[2]; + dst_stride[1] = dst_ref.stride()[0]; + break; + case 2: + src_stride[0] = src_ref.stride()[2]; + src_stride[1] = src_ref.stride()[1]; + src_stride[2] = src_ref.stride()[0]; + dst_stride[0] = dst_ref.stride()[2]; + dst_stride[1] = dst_ref.stride()[1]; + break; + case 3: + src_stride[0] = src_ref.stride()[2]; + src_stride[1] = src_ref.stride()[1]; + src_stride[2] = src_ref.stride()[0]; + + dst_stride[0] = dst_ref.stride()[2]; + dst_stride[1] = dst_ref.stride()[1]; + dst_stride[2] = dst_ref.stride()[0]; + + default: break; + } + + if (reduction_index == 3) { + return reduction_contiguous( + dst_ref.data(), + dst_stride, + src_ref.data(), + src_stride, + device_workspace_ptr, + reduction_identity, + reduction_op, + stream); + } + else { + return reduction_strided( + dst_ref.data(), + dst_stride, + src_ref.data(), + src_stride, + device_workspace_ptr, + reduction_identity, + reduction_op, + stream); + } + } + + Status operator()( + TensorRef dst_ref, + TensorRef src_ref, + void *device_workspace_ptr = nullptr, + ElementCompute reduction_identity = ElementCompute(), + ReductionOp reduction_op = ReductionOp(), + cudaStream_t stream = nullptr) { + + return reduce( + dst_ref, + src_ref, + device_workspace_ptr, + reduction_identity, + reduction_op, + stream); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace reduction +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/device/tensor_reduce_affine_contiguous.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/device/tensor_reduce_affine_contiguous.h new file mode 100644 index 0000000000000000000000000000000000000000..234a1c4c443c4ceef3f93d973cd12977e2fc8191 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/device/tensor_reduce_affine_contiguous.h @@ -0,0 +1,373 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Kernel performing a reduction over one or more ranks of an affine tensor +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/fast_math.h" +#include "cutlass/numeric_types.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/device_kernel.h" + +#include "cutlass/reduction/kernel/tensor_reduce_affine_contiguous.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reduction { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tensor reduction operator on layouts which are affine +template < + int Rank, ///< Rank of source tensor (e.g. NDHWC => 5) + int ReducedRank, ///< Rank of reduced tensor (e.g. ND => 2) + typename ElementOutput_, + typename ElementSource_, + typename ReductionOp_, + int VectorLength = 1, + typename ElementCompute_ = ElementOutput_, + int Threads = 256, ///< Number of participating threads + int BatchSize = 4 ///< Number of elements to load per batch +> +struct TensorReductionAffineContiguous { + + static int const kRank = Rank; + static int const kReducedRank = ReducedRank; + static int const kVectorLength = VectorLength; + static int const kInnerRank = kRank - kReducedRank; + static int const kThreads = Threads; + static int const kBatchSize = BatchSize; + + using ElementOutput = ElementOutput_; + using ElementSource = ElementSource_; + using ReductionOp = ReductionOp_; + using ElementCompute = ElementCompute_; + + // + // Data members + // + + /// Internal status field + Status status; + + /// Extent of tensor in source layout + Coord extent; + + /// Number of points in the outer index space + int64_t outer_count; + + /// Number of elements in the inner index space + int64_t inner_count; + + /// Number of workspaces needed + int workspace_count; + + /// CUDA Grid shape (.x => contiguous, .y => outer, .z => inner) + dim3 grid_shape; + + /// CUDA Threadblock shape (.x => contiguous, .y => outer, .z => inner) + dim3 threadblock_shape; + + /// CUDA grid shape for the final reduction step if needed + dim3 grid_final; + + /// CUDA threadblock shape for the final reduction step if needed + dim3 threadblock_final; + +private: + // + // Methods + // + + /// Helper to reshape 'count' such that it is less than 2 x 'ext' + static int reshape_pow2(int ext, int count) { + if (ext > count) { + return 1; + } + int x = 1; + for (; count >= ext * 2; ) { + count >>= 1; + x <<= 1; + } + return x; + } + +public: + + /// Default ctor + TensorReductionAffineContiguous(): + status(Status::kErrorInvalidProblem), + extent(), + outer_count(0), + inner_count(0), + workspace_count(0), + grid_shape(0, 0, 0), + threadblock_shape(0, 0, 0) { } + + /// Constructor + TensorReductionAffineContiguous( + Coord extent_, + int target_threadblock_count = 128 + ): + status(Status::kSuccess), + extent(extent_), + outer_count(0), + inner_count(0), + workspace_count(0) { + + // + // Plan the parallel mapping strategy. + // + + outer_count = 1; + inner_count = 1; + + // Compute number of elements in strided ranks + for (int p = 0; p < kReducedRank; ++p) { + outer_count *= extent[p]; + } + + for (int p = 0; p < kInnerRank; ++p) { + inner_count *= extent[kReducedRank + p]; + } + + int cta_count_x = 1; + int cta_count_y = 1; + int cta_count_z = 1; + + int cta_threads_x = kThreads; + int cta_threads_y = 1; + int cta_threads_z = 1; + + // Determine CTA shape + int64_t inner_vector_count = inner_count / kVectorLength; + + // Priority 1. Assign threadblocks to outer indices if possible + if (outer_count > target_threadblock_count) { + cta_count_x = 1; + cta_count_y = target_threadblock_count; + cta_count_z = 1; + } + else { + + cta_count_y = int(outer_count); + int remaining_ctas = target_threadblock_count / cta_count_y; + + // Priority 2. Assign inner dimensions to one CTA + if (inner_vector_count > cta_threads_x) { + int64_t cta_z_bound = inner_vector_count / cta_threads_x; + if (cta_z_bound > remaining_ctas) { + cta_count_z = remaining_ctas; + } + else { + cta_count_z = int(cta_z_bound); + } + } + else { + cta_threads_x = reshape_pow2(int(inner_vector_count), cta_threads_x); + cta_count_z = 1; + } + } + + grid_shape = dim3(cta_count_x, cta_count_y, cta_count_z); + threadblock_shape = dim3(cta_threads_x, cta_threads_y, cta_threads_z); + + workspace_count = (cta_count_z > 1 ? cta_count_z : 0); + + // Determine shape of final reduction kernel if needed + if (workspace_count) { + + int final_threads = kThreads; + int final_ctas = 1; + + if (outer_count > kThreads) { + final_ctas = int(outer_count + kThreads - 1) / kThreads; + } + else { + final_threads = int(outer_count); + } + + grid_final = dim3(final_ctas, 1, 1); + threadblock_final = dim3(final_threads, 1, 1); + } + else { + grid_final = dim3(0, 0, 0); + threadblock_final = dim3(0, 0, 0); + } + } + + /// Simple check to verify the object is initialized correctly + bool good() const { + return status == Status::kSuccess; + } + + /// Size (in bytes) of workspace elements which are densely packed together + int64_t workspace_stride() const { + + // Error condition + if (!good()) { + return 0; + } + + return outer_count * sizeof_bits::value / 8; + } + + /// Returns the size (in bytes) of a temporary workspace needed for reduction across CTAs + int64_t workspace_size() const { + + // Error condition + if (!good()) { + return 0; + } + + // No reduction across CTAs + if (grid_shape.z == 1) { + return 0; + } + + return workspace_stride() * grid_shape.z; + } + + /// Performs a reduction + Status reduce( + ElementOutput *dst_ptr, ///< Pointer to destination tensor + int64_t dst_stride[], ///< Stride vector (of length kReducedRank - 1) + ElementSource const *src_ptr, ///< Pointer to source tensor + int64_t src_stride[], ///< Stride vector (of length kRank - 1) + void *device_workspace_ptr = nullptr, ///< Device workspace + ElementCompute reduction_identity = ElementCompute(), ///< Reduction identity element + ReductionOp reduction_op = ReductionOp(), ///< Reduction operator + cudaStream_t stream = nullptr) { ///< CUDA Stream into which all kernels are launched + + // Initial status check + if (!good()) { + return status; + } + + // Guard against null workspace + if (workspace_count > 1 && device_workspace_ptr == nullptr) { + return Status::kErrorWorkspaceNull; + } + + // Define reduction kernel + using ReductionKernel = kernel::TensorReductionAffineContiguous< + kRank, + kReducedRank, + ElementOutput, + ElementSource, + ReductionOp, + kVectorLength, + ElementCompute, + kThreads>; + + using FinalReductionKernel = kernel::TensorReductionAffineContiguousFinal< + kRank, + kReducedRank, + ElementOutput, + ElementSource, + ReductionOp, + kVectorLength, + ElementCompute, + kThreads>; + + using Params = typename ReductionKernel::Params; + + // Construct the parameters + Params params( + extent, + dst_ptr, + dst_stride, + src_ptr, + src_stride, + static_cast(device_workspace_ptr), + workspace_stride(), + workspace_count, + reduction_op, + reduction_identity); + + // Shared memory size + int shared_mem_bytes = sizeof(typename ReductionKernel::SharedStorage); + + // Launch the kernel + Kernel<<< grid_shape, threadblock_shape, shared_mem_bytes, stream >>>(params); + + // Check error condition + if (cudaPeekAtLastError() == cudaSuccess) { + status = Status::kSuccess; + } + else { + status = Status::kErrorInternal; + } + + // Final reduction kernel + if (workspace_count) { + Kernel<<< grid_final, threadblock_final, 0, stream >>>(params); + } + + // Check error condition + if (cudaPeekAtLastError() == cudaSuccess) { + status = Status::kSuccess; + } + else { + status = Status::kErrorInternal; + } + + return status; + } + + /// Helper to use overloaded function call operator + Status operator()( + ElementOutput *dst_ptr, ///< Pointer to destination tensor + int64_t dst_stride[], ///< Stride vector (of length kReducedRank - 1) + ElementSource const *src_ptr, ///< Pointer to source tensor + int64_t src_stride[], ///< Stride vector (of length kRank - 1) + void *device_workspace_ptr = nullptr, ///< Pointer to device workspace + ElementCompute reduction_identity = ElementCompute(), ///< Reduction identity element + ReductionOp reduction_op = ReductionOp(), ///< Reduction operator + cudaStream_t stream = nullptr) { ///< CUDA Stream into which all kernels are launched + + return reduce(dst_ptr, dst_stride, src_ptr, src_stride, device_workspace_ptr, reduction_identity, reduction_op, stream); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace reduction +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/device/tensor_reduce_affine_strided.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/device/tensor_reduce_affine_strided.h new file mode 100644 index 0000000000000000000000000000000000000000..e6139340ab316ac1ca8fe762569935344a73aa25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/device/tensor_reduce_affine_strided.h @@ -0,0 +1,361 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Kernel performing a reduction over one or more ranks of an affine tensor +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/fast_math.h" +#include "cutlass/numeric_types.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/device_kernel.h" + +#include "cutlass/reduction/kernel/tensor_reduce_affine_strided.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reduction { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tensor reduction operator on layouts which are affine +template < + int Rank, ///< Rank of source tensor (e.g. NDHWC => 5) + int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2) + typename ElementOutput_, + typename ElementSource_, + typename ReductionOp_, + int VectorLength = 1, + typename ElementCompute_ = ElementOutput_, + int Threads = 256, ///< Number of participating threads + int BatchSize = 4 ///< Number of elements to load per batch +> +struct TensorReductionAffineStrided { + + static int const kRank = Rank; + static int const kReducedRank = ReducedRank; + static int const kVectorLength = VectorLength; + static int const kInnerRank = kRank - kReducedRank; + static int const kThreads = Threads; + static int const kBatchSize = BatchSize; + + using ElementOutput = ElementOutput_; + using ElementSource = ElementSource_; + using ReductionOp = ReductionOp_; + using ElementCompute = ElementCompute_; + + // + // Data members + // + + /// Internal status field + Status status; + + /// Extent of tensor in source layout + Coord extent; + + /// Number of points in the outer index space + int64_t outer_count; + + /// Number of elements in the inner index space + int64_t inner_count; + + /// Number of workspaces needed + int workspace_count; + + /// CUDA Grid shape (.x => contiguous, .y => outer, .z => inner) + dim3 grid_shape; + + /// CUDA Threadblock shape (.x => contiguous, .y => outer, .z => inner) + dim3 threadblock_shape; + + /// CUDA grid shape for the final reduction step if needed + dim3 grid_final; + + /// CUDA threadblock shape for the final reduction step if needed + dim3 threadblock_final; + +private: + // + // Methods + // + + /// Helper to reshape 'count' such that it is less than 2 x 'ext' + static int reshape_pow2(int ext, int count) { + if (ext > count) { + return 1; + } + int x = 1; + for (; count >= ext * 2; ) { + count >>= 1; + x <<= 1; + } + return x; + } + +public: + + /// Default ctor + TensorReductionAffineStrided(): + status(Status::kErrorInvalidProblem), + extent(), + outer_count(0), + inner_count(0), + workspace_count(0), + grid_shape(0, 0, 0), + threadblock_shape(0, 0, 0) { } + + /// Constructor + TensorReductionAffineStrided( + Coord extent_, + int target_threadblock_count = 128 + ): + status(Status::kSuccess), + extent(extent_), + outer_count(0), + inner_count(0), + workspace_count(0) { + + // + // Plan the parallel mapping strategy. + // + + outer_count = 1; + inner_count = 1; + + // Compute number of elements in strided ranks + for (int p = 0; p < kReducedRank - 1; ++p) { + outer_count *= extent[p]; + } + + for (int p = 0; p < kInnerRank; ++p) { + inner_count *= extent[kReducedRank + p - 1]; + } + + // Compute plan for the reduction + int extent_c = extent[kRank - 1]; + int vectors_c = (extent_c -1 + kVectorLength) / kVectorLength; + + // Determine CTA shape + int cta_width = kThreads * kVectorLength; + int cta_ways = reshape_pow2(extent_c, cta_width); + int cta_threads_x = kThreads / cta_ways; + + threadblock_shape = dim3(cta_threads_x, 1, std::min(cta_ways, 64)); + + // This leads to an error. + if (threadblock_shape.z > 1) { + if (threadblock_shape.y != 1) { + status = Status::kErrorInternal; + return; + } + } + + // Determine grid shape + int cta_count_x = (vectors_c + cta_threads_x - 1) / cta_threads_x; + int cta_count_y = std::max(1, target_threadblock_count / cta_count_x); + + // Limit the number of CTAs assigned to outer dimension + if (int64_t(cta_count_y * threadblock_shape.y) > outer_count) { + cta_count_y = int(outer_count + threadblock_shape.y - 1) / threadblock_shape.y; + } + + // Limit the number of CTAs assigned to inner dimension + int cta_count_z = std::max(1, target_threadblock_count / cta_count_y); + if (int64_t(cta_count_z * threadblock_shape.z) > inner_count) { + cta_count_z = int(inner_count + threadblock_shape.z - 1) / threadblock_shape.z; + } + + grid_shape = dim3(cta_count_x, cta_count_y, cta_count_z); + workspace_count = (cta_count_z > 1 ? cta_count_z : 0); + + // Determine shape of final reduction kernel if needed + grid_final = dim3(cta_count_x, int(outer_count)); + threadblock_final = dim3(cta_threads_x, 1, 1); + } + + /// Simple check to verify the object is initialized correctly + bool good() const { + return status == Status::kSuccess; + } + + /// Size of one CTA's workspace + int64_t workspace_stride() const { + + // Error condition + if (!good()) { + return 0; + } + + int vector_size_bytes = kVectorLength * sizeof_bits::value / 8; + + return extent[kRank - 1] * vector_size_bytes; + } + + /// Returns the size (in bytes) of a temporary workspace needed for reduction across CTAs + int64_t workspace_size() const { + + // Error condition + if (!good()) { + return 0; + } + + // No reduction across CTAs + if (grid_shape.z == 1) { + return 0; + } + + return workspace_stride() * outer_count * grid_shape.z; + } + + /// Performs a reduction + Status reduce( + ElementOutput *dst_ptr, ///< Pointer to destination tensor + int64_t dst_stride[], ///< Stride vector (of length kReducedRank - 1) + ElementSource const *src_ptr, ///< Pointer to source tensor + int64_t src_stride[], ///< Stride vector (of length kRank - 1) + void *device_workspace_ptr = nullptr, ///< Device workspace + ElementCompute reduction_identity = ElementCompute(), ///< Reduciton identity + ReductionOp reduction_op = ReductionOp(), ///< Reduction operator + cudaStream_t stream = nullptr) { ///< CUDA Stream into which all kernels are launched + + // Initial status check + if (!good()) { + return status; + } + + // Guard against null workspace + if (workspace_count > 1 && device_workspace_ptr == nullptr) { + return Status::kErrorWorkspaceNull; + } + + // Define reduction kernel + using ReductionKernel = kernel::TensorReductionAffineStrided< + kRank, + kReducedRank, + ElementOutput, + ElementSource, + ReductionOp, + kVectorLength, + ElementCompute, + kThreads>; + + using FinalReductionKernel = kernel::TensorReductionAffineStridedFinal< + kRank, + kReducedRank, + ElementOutput, + ElementSource, + ReductionOp, + kVectorLength, + ElementCompute, + kThreads>; + + using Params = typename ReductionKernel::Params; + + // Construct the parameters + Params params( + extent, + dst_ptr, + dst_stride, + src_ptr, + src_stride, + static_cast(device_workspace_ptr), + workspace_stride(), + workspace_count, + reduction_op, + reduction_identity); + + // Shared memory size + int shared_mem_bytes = sizeof(typename ReductionKernel::SharedStorage); + + // Launch the kernel + Kernel<<< grid_shape, threadblock_shape, shared_mem_bytes, stream >>>(params); + + // Check error condition + if (cudaPeekAtLastError() == cudaSuccess) { + status = Status::kSuccess; + } + else { + status = Status::kErrorInternal; + } + + // Final reduction kernel + if (workspace_count) { + + Kernel<<< grid_final, threadblock_final, 0, stream >>>(params); + + // Check error condition + if (cudaPeekAtLastError() == cudaSuccess) { + status = Status::kSuccess; + } + else { + status = Status::kErrorInternal; + } + } + + return status; + } + + /// Helper to use overloaded function call operator + Status operator()( + ElementOutput *dst_ptr, ///< Pointer to destination tensor + int64_t dst_stride[], ///< Stride vector (of length kReducedRank - 1) + ElementSource const *src_ptr, ///< Pointer to source tensor + int64_t src_stride[], ///< Stride vector (of length kRank - 1) + void *device_workspace_ptr = nullptr, ///< Pointer to device workspace + ElementCompute reduction_identity = ElementCompute(), ///< Reduciton identity + ReductionOp reduction_op = ReductionOp(), ///< Reduction operator + cudaStream_t stream = nullptr) { ///< CUDA Stream into which all kernels are launched + + return reduce( + dst_ptr, + dst_stride, + src_ptr, + src_stride, + device_workspace_ptr, + reduction_identity, + reduction_op, + stream); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace reduction +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/kernel/reduce_softmax_final.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/kernel/reduce_softmax_final.h new file mode 100644 index 0000000000000000000000000000000000000000..99e8aed7532f69f377ff27337da110f2cd865807 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/kernel/reduce_softmax_final.h @@ -0,0 +1,267 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Kernel performing a final reduction for softmax +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/numeric_types.h" +#include "cutlass/array.h" +#include "cutlass/functional.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/arch/memory.h" +#include "cutlass/arch/memory_sm75.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reduction { +namespace kernel { + +template < + typename ElementNorm_, + typename ElementSum_, + typename ElementSoftmaxCompute_, + typename ThreadblockShape_, + bool GroupedProblem = false +> +class ApplySoftmaxFinalReduction { +public: + + using ElementNorm = ElementNorm_; + using ElementSum = ElementSum_; + using ElementSoftmaxCompute = ElementSoftmaxCompute_; + using ThreadblockShape = ThreadblockShape_; + static const bool isGroupedProblem = GroupedProblem; + + // + // Arguments + // + + struct Arguments { + + cutlass::gemm::GemmCoord* problem_sizes; + cutlass::gemm::GemmCoord problem_size; + ElementNorm* block_Norm; + ElementSum* block_Sum; + int64_t* offset_Norm_Device; + int64_t* offset_Sum_Device; + int64_t batch_stride_Max; + int64_t batch_stride_Sum; + + // + // Methods + // + Arguments() { } + + // Non-grouped constructor without batching + Arguments( + cutlass::gemm::GemmCoord problem_size, + ElementNorm* block_Norm, + ElementSum* block_Sum + ): + problem_size(problem_size), + block_Norm(block_Norm), + block_Sum(block_Sum), + problem_sizes(nullptr), + offset_Norm_Device(nullptr), + offset_Sum_Device(nullptr), + batch_stride_Max(0), + batch_stride_Sum(0) + { + + } + + // Non-grouped constructor with batching + Arguments( + cutlass::gemm::GemmCoord problem_size, + ElementNorm* block_Norm, + ElementSum* block_Sum, + int64_t batch_stride_Max, + int64_t batch_stride_Sum + ): + problem_size(problem_size), + block_Norm(block_Norm), + block_Sum(block_Sum), + batch_stride_Max(batch_stride_Max), + batch_stride_Sum(batch_stride_Sum), + problem_sizes(nullptr), + offset_Norm_Device(nullptr), + offset_Sum_Device(nullptr) + { + + } + + + // Grouped constructor + Arguments( + cutlass::gemm::GemmCoord *problem_sizes, + ElementNorm* block_Norm, + ElementSum* block_Sum, + int64_t* offset_Norm_Device, + int64_t* offset_Sum_Device + ): + problem_sizes(problem_sizes), + problem_size(cutlass::gemm::GemmCoord(0, 0, 0)), + block_Norm(block_Norm), + block_Sum(block_Sum), + offset_Norm_Device(offset_Norm_Device), + offset_Sum_Device(offset_Sum_Device) + { + + } + }; + + struct SharedStorage { + + + }; + + // + // Params struct + // + + struct Params { + Arguments args; + + // + // Methods + // + Params() { } + + Params(Arguments const &args_): args(args_) { } + }; + +private: + +public: + + CUTLASS_DEVICE + ApplySoftmaxFinalReduction() { } + + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + apply(params, shared_storage); + } + +private: + + /// Full reduction + CUTLASS_DEVICE + void apply(Params const ¶ms, SharedStorage &shared_storage) { + + int tid = threadIdx.x; + int bid = blockIdx.x; + int bdim = blockDim.x; + + int block_batch = blockIdx.z; + + // defining three vars for a general reduction module + cutlass::gemm::GemmCoord problem_size = isGroupedProblem ? params.args.problem_sizes[bid] : params.args.problem_size; + int m_dim_in_loop = isGroupedProblem ? problem_size.m() : tid + bdim; + int access_offset = isGroupedProblem ? 0 : bid * bdim; + + if (!isGroupedProblem && access_offset + tid >= problem_size.m()) return; + + ElementNorm *curr_ptr_Max = isGroupedProblem ? \ + params.args.block_Norm + params.args.offset_Norm_Device[bid] : \ + params.args.block_Norm + block_batch * params.args.batch_stride_Max; + ElementSum *curr_ptr_Sum = isGroupedProblem ? \ + params.args.block_Sum + params.args.offset_Sum_Device[bid] : \ + params.args.block_Sum + block_batch * params.args.batch_stride_Sum; + + int threadblock_num = (problem_size.n() + ThreadblockShape::kN - 1) / ThreadblockShape::kN; + + using ConvertSumOutput = cutlass::NumericConverter; + using ConvertNormOutput = cutlass::NumericConverter; + + using ConvertSum = cutlass::NumericConverter; + using ConvertNorm = cutlass::NumericConverter; + + ConvertSum convert_sum; + ConvertNorm convert_norm; + + ConvertSumOutput convert_sum_output; + ConvertNormOutput convert_norm_output; + + uint32_t float_max_bits = 0xff7fffff; + float min_float = reinterpret_cast(float_max_bits); + + CUTLASS_PRAGMA_UNROLL + for (int idx_m = tid; idx_m < m_dim_in_loop; idx_m += bdim) { + ElementNorm *access_n = curr_ptr_Max + idx_m + access_offset; + ElementSum *access_s = curr_ptr_Sum + idx_m + access_offset; + ElementNorm *access_n_bak = access_n; + ElementSum *access_s_bak = access_s; + ElementSoftmaxCompute max_val = ElementSoftmaxCompute(min_float); + ElementSoftmaxCompute sum_val = ElementSoftmaxCompute(0); + ElementNorm fetch_n; + ElementSum fetch_s; + + CUTLASS_PRAGMA_UNROLL + for (int idx_n = 0; idx_n < threadblock_num; idx_n++) { + cutlass::arch::global_load(fetch_n, access_n, true); + max_val = cutlass::fast_max(max_val, convert_norm(fetch_n)); + access_n += problem_size.m(); + } + + access_n = access_n_bak; + + CUTLASS_PRAGMA_UNROLL + for (int idx_n = 0; idx_n < threadblock_num; idx_n++) { + cutlass::arch::global_load(fetch_n, access_n, true); + cutlass::arch::global_load(fetch_s, access_s, true); + sum_val += convert_sum(fetch_s) * cutlass::fast_exp(convert_norm(fetch_n) - max_val); + access_n += problem_size.m(); + access_s += problem_size.m(); + } + + ElementSoftmaxCompute inv_sum = cutlass::constants::one() / sum_val; + + access_n = access_n_bak; + access_s = access_s_bak; + + access_n[0] = convert_norm_output(max_val); + access_s[0] = convert_sum_output(inv_sum); + } + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace reduction +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/kernel/reduce_split_k.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/kernel/reduce_split_k.h new file mode 100644 index 0000000000000000000000000000000000000000..96847e7e3125a7cad4fbdffc552a5707ffc9eba0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/kernel/reduce_split_k.h @@ -0,0 +1,248 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Kernel performing a reduction over densely packed tensors in global memory +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/numeric_types.h" +#include "cutlass/array.h" +#include "cutlass/functional.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_conversion.h" + +#include "cutlass/layout/matrix.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reduction { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Shape_, ///< shape of CTA (concept: MatrixShape) + typename OutputOp_ , ///< output operator (concept: epilogue::thread operator) + typename ReductionOp_, ///< reduction operator (concept: ReductionOperator) + int PartitionsPerStage = 4 ///< number of partitions to issue +> +class ReduceSplitK { +public: + + using Shape = Shape_; + using ReductionOp = ReductionOp_; + using OutputOp = OutputOp_; + static int const kElementsPerAccess = OutputOp::kCount; + static int const kPartitionsPerStage = PartitionsPerStage; + + using ElementWorkspace = typename ReductionOp::Element; + using ElementAccumulator = typename ReductionOp::ElementAccumulator; + using ElementOutput = typename OutputOp::ElementOutput; + + using WorkspaceTensorRef = TensorRef; + using OutputTensorRef = TensorRef; + using StrideIndex = typename WorkspaceTensorRef::Layout::Stride::Index; + + using FragmentWorkspace = AlignedArray; + using FragmentAccumulator = Array; + using FragmentOutput = AlignedArray; + + // + // Types + // + + /// Params structure + struct Params { + + MatrixCoord problem_size; + int partitions; + size_t partition_stride; + WorkspaceTensorRef workspace; + OutputTensorRef destination; + OutputTensorRef source; + typename OutputOp::Params output; + typename ReductionOp::Params reduction; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params() { } + + CUTLASS_HOST_DEVICE + Params( + MatrixCoord problem_size_, + int partitions_, + size_t partition_stride_, + WorkspaceTensorRef workspace_, + OutputTensorRef destination_, + OutputTensorRef source_, + typename OutputOp::Params output_ = typename OutputOp::Params(), + typename ReductionOp::Params reduction_ = typename ReductionOp::Params() + ): + problem_size(problem_size_), + partitions(partitions_), + partition_stride(sizeof(FragmentWorkspace) * partition_stride_ / kElementsPerAccess), + workspace(workspace_), + destination(destination_), + source(source_), + output(output_), + reduction(reduction_) { + + } + }; + + struct SharedStorage { }; + + +public: + + /// Computes the grid size given a chosen threadblock shape + CUTLASS_HOST_DEVICE + static dim3 grid_shape( + cutlass::MatrixCoord problem_size) { + + return dim3( + (problem_size.row() + Shape::kRow - 1) / Shape::kRow, + (problem_size.column() + Shape::kColumn - 1) / Shape::kColumn); + } + + /// Determines the threadblock shape + CUTLASS_HOST_DEVICE + static dim3 block_shape() { + return dim3(Shape::kColumn / kElementsPerAccess, Shape::kRow); + } + + /// Perform a reduction + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &storage) { + + // Determine CTA position + MatrixCoord thread_offset( + MatrixCoord::Index(int(blockIdx.x) * Shape::kRow + threadIdx.y), + MatrixCoord::Index(int(blockIdx.y) * Shape::kColumn + threadIdx.x * kElementsPerAccess) + ); + + // One guard conditional + if (!(thread_offset.row() < params.problem_size.row() && + thread_offset.column() < params.problem_size.column())) { + + return; + } + + + ReductionOp reduction_op(params.reduction); + + FragmentAccumulator accumulator; + + accumulator.clear(); + + // + // Load the first slice + // + + char const *workspace_ptr = + reinterpret_cast( + params.workspace.data() + params.workspace.offset(thread_offset)); + + FragmentWorkspace workspace_frag[kPartitionsPerStage]; + + // + // Construct the output operator + // + + OutputOp output_op(params.output); + + // + // Load and accumulate with a simple batched loading sequence. + // + + CUTLASS_PRAGMA_NO_UNROLL + for (int k = 0; k < params.partitions; k += kPartitionsPerStage) { + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPartitionsPerStage; ++i) { + if (k + i < params.partitions) { + workspace_frag[i] = *reinterpret_cast(workspace_ptr); + workspace_ptr += params.partition_stride; + } + } + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPartitionsPerStage; ++i) { + if (k + i < params.partitions) { + accumulator = reduction_op(accumulator, workspace_frag[i]); + } + } + } + + // + // Conditionally load the source + // + + FragmentOutput source_frag; + + source_frag.clear(); + + FragmentOutput const *source_ptr = reinterpret_cast( + params.source.data() + params.source.offset(thread_offset)); + + if (output_op.is_source_needed()) { + reinterpret_cast(source_frag) = *source_ptr; + } + + // + // Compute the output + // + + typename OutputOp::FragmentOutput output_frag = output_op(accumulator, source_frag); + + // + // Store + // + + FragmentOutput *dest_ptr = reinterpret_cast( + params.destination.data() + params.destination.offset(thread_offset)); + + *dest_ptr = reinterpret_cast(output_frag); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace reduction +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/kernel/tensor_reduce_affine_contiguous.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/kernel/tensor_reduce_affine_contiguous.h new file mode 100644 index 0000000000000000000000000000000000000000..5a0b9f47138362a24443e0ddd0cd6dbb61b684c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/kernel/tensor_reduce_affine_contiguous.h @@ -0,0 +1,606 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Kernel performing a reduction over one or more ranks of an affine tensor +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/fast_math.h" +#include "cutlass/numeric_types.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/device_kernel.h" + +#include "cutlass/reduction/thread/reduction_operators.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reduction { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Parameters structure +template < + int Rank, ///< Rank of source tensor (e.g. NDHWC => 5) + int ReducedRank, ///< Rank of reduced tensor (i.e. number of outer ranks) + typename ElementOutput, ///< Data type of output tensor + typename ElementSource, ///< Data type of source tensor + typename ReductionOp, ///< Reduction operator + int VectorLength = 1, ///< Vector length for memory + typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation + int Threads = 256, ///< Number of participating threads + int BatchSize = 4 ///< Number of elements to load per batch +> +struct TensorReductionAffineContiguousParams { + + static int const kRank = Rank; + static int const kReducedRank = ReducedRank; + static int const kVectorLength = VectorLength; + static int const kInnerRank = kRank - kReducedRank; + static int const kThreads = Threads; + static int const kBatchSize = BatchSize; + + Coord extent; /// Extent of source tensor + FastDivmodU64 divmod[kRank - 1]; /// FastDivmod by each strided rank + int64_t dst_stride[kReducedRank]; /// stride (units of bytes) - I, J + int64_t src_stride[kRank - 1]; /// stride (units of bytes) - I, J, K + int64_t workspace_stride; /// stride (units of bytes) between workspace + int workspace_count; /// number of workspaces + + uint64_t inner_count; /// Number of elements in reduced index space + uint64_t outer_count; /// Number of elements in outer index space + + ElementOutput * destination; /// Pointer to output tensor of rank kReducedRank + ElementSource const * source; /// Pointer to source pointer of rank kRank + ReductionOp reduction_op; /// Reduction operator + ElementCompute reduction_identity; /// Identity element used by reduction operator + ElementCompute *device_workspace; /// Pointer to device workspace for inter-CTA reductions + + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + TensorReductionAffineContiguousParams() { + + } + + /// Ctor + TensorReductionAffineContiguousParams( + Coord extent_, ///< Extent of source tensor + ElementOutput * dst_ptr_, ///< Output tensor data + int64_t dst_stride_[], ///< Stride (units of elements) + ElementSource const * src_ptr_, ///< Source tensor data + int64_t src_stride_[], ///< Stride (units of elements) + ElementCompute *device_workspace_, ///< Pointer to device workspace for inter-CTA reductions + int64_t workspace_stride_, ///< Stride between workspaces + int workspace_count_, ///< Number of workspaces + ReductionOp reduction_op_, ///< Reduction operator + ElementCompute reduction_identity_ = ElementCompute() ///< Identity element used by reduction operator + ): + extent(extent_), + inner_count(1), + outer_count(1), + destination(dst_ptr_), + source(src_ptr_), + device_workspace(device_workspace_), + workspace_stride(workspace_stride_), + workspace_count(workspace_count_), + reduction_op(reduction_op_), + reduction_identity(reduction_identity_) { + + // Initialize divisors for fast div-mod + for (int p = 1; p < kRank; ++p) { + divmod[p - 1] = FastDivmodU64(uint64_t(extent[p])); + } + + int input_size_bits = sizeof_bits::value; + int output_size_bits = sizeof_bits::value; + + // Compute strides in units of bytes + for (int p = 0; p < kReducedRank; ++p) { + dst_stride[p] = dst_stride_[p] * output_size_bits / 8; + } + + for (int p = 0; p < kRank - 1; ++p) { + src_stride[p] = src_stride_[p] * input_size_bits / 8; + } + + // Compute number of elements in strided ranks + for (int p = 0; p < kReducedRank; ++p) { + outer_count *= uint64_t(extent[p]); + } + + for (int p = 0; p < kInnerRank; ++p) { + inner_count *= uint64_t(extent[kRank - 1 - p]); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Kernel to reduce a tensor with affine layout over a set of ranks *INCLUDING* the contiguous +/// rank. This leads to favorable vectorized memory accesses over the contiguous rank. +template < + int Rank, ///< Rank of source tensor (e.g. NDHWC => 5) + int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2) + typename ElementOutput, ///< Data type of output tensor + typename ElementSource, ///< Data type of source tensor + typename ReductionOp, ///< Reduction operator + int VectorLength = 1, ///< Vector length for memory + typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation + int Threads = 256, ///< Number of participating threads + int BatchSize = 4 ///< Number of elements to load per batch +> +class TensorReductionAffineContiguous { +public: + + static int const kRank = Rank; + static int const kReducedRank = ReducedRank; + static int const kVectorLength = VectorLength; + static int const kInnerRank = kRank - kReducedRank; + static int const kThreads = Threads; + static int const kBatchSize = BatchSize; + using ComputeFragment = Array; + using SourceFragment = AlignedArray; + using OutputFragment = AlignedArray; + + /// Shared memory allocation used for reduction within the CTA + struct SharedStorage { + Array workspace; + }; + + /// Parameters structure + using Params = TensorReductionAffineContiguousParams< + Rank, + ReducedRank, + ElementOutput, + ElementSource, + ReductionOp, + VectorLength, + ElementCompute, + Threads, + BatchSize + >; + +private: + + /// Computes the coordinate and offset of a given linear index + CUTLASS_DEVICE + void compute_inner_coord_and_offset_( + Params const ¶ms, + Coord & coord, + int64_t &src_offset, + uint64_t linear_idx) const { + + // Decompose into a coordinate of rank + coord = CoordinateDecomposition(linear_idx, ¶ms.divmod[kRank - kInnerRank]); + + // Compute an offset using the souce stride + src_offset = 0; + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kInnerRank - 1; ++i) { + src_offset += coord[i] * params.src_stride[kReducedRank + i]; + } + src_offset += coord[kInnerRank - 1] * sizeof_bits::value / 8; + } + + /// Computes the coordinate and offset of a given linear index + CUTLASS_DEVICE + void compute_outer_coord_and_offset_( + Params const ¶ms, + Coord & coord, + int64_t &dst_offset, + int64_t &src_offset, + uint64_t linear_idx) const { + + // Decompose into coordinate of rank + coord = CoordinateDecomposition(linear_idx, params.divmod); + + // Compute offsets using destination and source strides + dst_offset = 0; + src_offset = 0; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kReducedRank; ++i) { + dst_offset += params.dst_stride[i] * coord[i]; + src_offset += params.src_stride[i] * coord[i]; + } + } + + /// Reduces over the reduction indices yielding a single element + CUTLASS_DEVICE + ElementCompute reduce_indices_( + Params const ¶ms, + ElementCompute *threadblock_workspace, + char const *src_byte_ptr, + int coord_c) { + + NumericArrayConverter convert_source; + ReductionOp reduction_op(params.reduction_op); + + // + // Early exit or initialize to identity element + // + if (!params.inner_count) { + return params.reduction_identity; + } + + ComputeFragment accumulator; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < accumulator.size(); ++i) { + accumulator[i] = params.reduction_identity; + } + + // Compute the coordinate of the first access + int64_t src_byte_offset = 0; + Coord coord; + + uint64_t linear_idx = (threadIdx.x + blockDim.x * threadIdx.z + blockDim.x * blockIdx.z * blockDim.z) * kVectorLength; + compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx); + + // Load the first vector + SourceFragment source_fragment[kBatchSize]; + + bool not_done = true; + + // Iterate over vectors in a linearized reduction index space + while (not_done) { + + bool guards[kBatchSize]; + + // Issue a batch of loads + CUTLASS_PRAGMA_UNROLL + for (int b = 0; b < kBatchSize; ++b) { + + if (linear_idx < params.inner_count) { + source_fragment[b] = *reinterpret_cast(src_byte_ptr + src_byte_offset); + guards[b] = true; + } + else { + guards[b] = false; + not_done = false; + } + + linear_idx += (blockDim.z * gridDim.z * blockDim.x) * kVectorLength; + compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx); + } + + // Perform a batch of reduction operations + CUTLASS_PRAGMA_UNROLL + for (int b = 0; b < kBatchSize; ++b) { + if (guards[b]) { + auto cvt = convert_source(source_fragment[b]); + + accumulator = cutlass::reduction::thread::detail::ApplyArrayOperator( + reduction_op, + accumulator, + cvt); + } + } + }; + + // + // Reduction of vectors to scalar + // + + ElementCompute reduced_accumulator = accumulator[0]; + + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < kVectorLength; ++i) { + reduced_accumulator = reduction_op(reduced_accumulator, accumulator[i]); + } + + // + // Reduction within CTA across threadIdx.xz => threadIdx{.x = 0, .z = 0} + // + // This re-arranges data so threadIdx.y is effectively a row index and threadIdx.xz is a column + // + + int thread_count = blockDim.x * blockDim.z; + int thread_j = threadIdx.x + blockDim.x * threadIdx.z; + int thread_i = threadIdx.y; + + ElementCompute *frag_ptr = reinterpret_cast(threadblock_workspace) + thread_i * thread_count; + + frag_ptr[thread_j] = reduced_accumulator; + + // + // Reduce + // + CUTLASS_PRAGMA_NO_UNROLL + while (thread_count > 1) { + thread_count /= 2; + + __syncthreads(); + + if (thread_j < thread_count) { + ElementCompute other = frag_ptr[thread_j + thread_count]; + + reduced_accumulator = reduction_op(reduced_accumulator, other); + + frag_ptr[thread_j] = reduced_accumulator; + } + + __syncthreads(); + } + + + return reduced_accumulator; + } + +public: + + /// Perform a reduction + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + int coord_c = (blockIdx.x * blockDim.x + threadIdx.x) * kVectorLength; + + char const * src_byte_ptr = reinterpret_cast(params.source); + char * dst_byte_ptr = nullptr; + + // If performing a reduction across CTAs, redirect output to device workspace + if (gridDim.z == 1) { + dst_byte_ptr = reinterpret_cast(params.destination); + } + else { + dst_byte_ptr = reinterpret_cast(params.device_workspace); + } + + uint64_t idx_linear = blockIdx.y * blockDim.y + threadIdx.y; + + // Use modulo division to compute location + Coord outer_coord; + int64_t dst_byte_offset; + int64_t src_byte_offset; + + compute_outer_coord_and_offset_( + params, + outer_coord, + dst_byte_offset, + src_byte_offset, + idx_linear); + + if (gridDim.z == 1) { + + /// Complete the reduction with no workspace + while (idx_linear < params.outer_count) { + + ElementCompute result = reduce_indices_( + params, + shared_storage.workspace.data(), + src_byte_ptr + src_byte_offset, + coord_c); + + // Store the result after possible final reduction within the CTA + if (threadIdx.z == 0 && threadIdx.x == 0) { + + // Convert to output type and store + NumericConverter convert_output; + ElementOutput cvt = convert_output(result); + + *reinterpret_cast(dst_byte_ptr + dst_byte_offset) = cvt; + } + + __syncthreads(); + + // Update indices and pointers + idx_linear += gridDim.y * blockDim.y; + + compute_outer_coord_and_offset_( + params, + outer_coord, + dst_byte_offset, + src_byte_offset, + idx_linear); + + } // while + } + else { + + /// Complete the reduction with workspace + while (idx_linear < params.outer_count) { + + ElementCompute result = reduce_indices_( + params, + shared_storage.workspace.data(), + src_byte_ptr + src_byte_offset, + coord_c); + + int64_t byte_offset = + blockIdx.z * params.workspace_stride + idx_linear * sizeof_bits::value / 8; + + // Store the result for final reduction + if (threadIdx.z == 0 && threadIdx.x == 0) { + *reinterpret_cast(dst_byte_ptr + byte_offset) = result; + } + + __syncthreads(); + + // Update indices and pointers + idx_linear += gridDim.y * blockDim.y; + + compute_outer_coord_and_offset_( + params, + outer_coord, + dst_byte_offset, + src_byte_offset, + idx_linear); + } // while + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Kernel to perform final reduction +template < + int Rank, ///< Rank of source tensor (e.g. NDHWC => 5) + int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2) + typename ElementOutput, ///< Data type of output tensor + typename ElementSource, ///< Data type of source tensor + typename ReductionOp, ///< Reduction operator + int VectorLength = 1, ///< Vector length for memory + typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation + int Threads = 256, ///< Number of participating threads + int BatchSize = 4 ///< Number of elements to load per batch +> +class TensorReductionAffineContiguousFinal { +public: + + static int const kRank = Rank; + static int const kReducedRank = ReducedRank; + static int const kVectorLength = VectorLength; + static int const kInnerRank = kRank - kReducedRank; + static int const kThreads = Threads; + static int const kBatchSize = BatchSize; + + /// Shared memory + struct SharedStorage { }; + + /// Parameters structure + using Params = TensorReductionAffineContiguousParams< + Rank, + ReducedRank, + ElementOutput, + ElementSource, + ReductionOp, + VectorLength, + ElementCompute, + Threads, + BatchSize + >; + +private: + + /// Computes the coordinate and offset of a given linear index + CUTLASS_DEVICE + void compute_outer_coord_and_offset_( + Params const ¶ms, + Coord & coord, + int64_t &dst_offset, + uint64_t linear_idx) const { + + // Decompose into coordinate of rank + coord = CoordinateDecomposition(linear_idx, params.divmod); + + // Compute offsets using destination and source strides + dst_offset = 0; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kReducedRank; ++i) { + dst_offset += params.dst_stride[i] * coord[i]; + } + } + + /// Reduces over the reduction indices + CUTLASS_DEVICE + ElementCompute reduce_indices_( + Params const ¶ms, + ElementCompute const *device_workspace) { + + ReductionOp reduction_op(params.reduction_op); + char const *src_byte_ptr = reinterpret_cast(device_workspace); + + // Accumulated output + ElementCompute accumulator = params.reduction_identity; + + for (int iter = 0; iter < params.workspace_count; ++iter) { + ElementCompute workspace_item = *reinterpret_cast(src_byte_ptr); + + accumulator = reduction_op(accumulator, workspace_item); + + src_byte_ptr += params.workspace_stride; + } + + return accumulator; + } + +public: + + // + // Methods + // + + /// Perform a reduction + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + uint64_t idx_linear = blockIdx.x * blockDim.x + threadIdx.x; + + char * dst_byte_ptr = reinterpret_cast(params.destination); + + // Use modulo division to compute location + Coord outer_coord; + int64_t dst_byte_offset; + + compute_outer_coord_and_offset_( + params, + outer_coord, + dst_byte_offset, + idx_linear); + + /// Complete the reduction + while (idx_linear < params.outer_count) { + + ElementCompute result = reduce_indices_(params, params.device_workspace + idx_linear); + + // Convert to output type and store + NumericConverter convert_output; + + *reinterpret_cast(dst_byte_ptr + dst_byte_offset) = convert_output(result); + + // Update indices and pointers + idx_linear += gridDim.x * blockDim.x; + + compute_outer_coord_and_offset_( + params, + outer_coord, + dst_byte_offset, + idx_linear); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace reduction +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/kernel/tensor_reduce_affine_strided.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/kernel/tensor_reduce_affine_strided.h new file mode 100644 index 0000000000000000000000000000000000000000..574c836d8fb3520a6598f342f95406d85001004c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/kernel/tensor_reduce_affine_strided.h @@ -0,0 +1,641 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Kernel performing a reduction over one or more ranks of an affine tensor +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/fast_math.h" +#include "cutlass/numeric_types.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/device_kernel.h" + +#include "cutlass/reduction/thread/reduction_operators.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reduction { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace kernel { + +/// Parameters structure +template < + int Rank, ///< Rank of source tensor (e.g. NDHWC => 5) + int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2) + typename ElementOutput, ///< Data type of output tensor + typename ElementSource, ///< Data type of source tensor + typename ReductionOp, ///< Reduction operator + int VectorLength = 1, ///< Vector length for memory + typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation + int Threads = 256, ///< Number of participating threads + int BatchSize = 4 ///< Number of elements to load per batch +> +struct TensorReductionAffineStridedParams { + + static int const kRank = Rank; + static int const kReducedRank = ReducedRank; + static int const kVectorLength = VectorLength; + static int const kInnerRank = kRank - kReducedRank; + static int const kThreads = Threads; + static int const kBatchSize = BatchSize; + + Coord extent; /// Extent of source tensor + FastDivmodU64 divmod[kRank - 1]; /// FastDivmod by each strided rank + int64_t dst_stride[kReducedRank - 1]; /// stride (units of bytes) - I, J + int64_t src_stride[kRank - 1]; /// stride (units of bytes) - I, J, K + int64_t workspace_stride; /// stride (units of bytes) between workspace + int64_t workspace_outer_stride; /// stride (units of bytes) between 'rows' of the workspace + int workspace_count; /// number of workspaces + + uint64_t inner_count; /// Number of elements in reduced index space + uint64_t outer_count; /// Number of elements in outer index space + + ElementOutput * destination; /// Pointer to output tensor of rank kReducedRank + ElementSource const * source; /// Pointer to source pointer of rank kRank + ReductionOp reduction_op; /// Reduction operator + ElementCompute reduction_identity; /// Identity element for reduction operator + ElementCompute *device_workspace; /// Pointer to device workspace for inter-CTA reductions + + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + TensorReductionAffineStridedParams() { + + } + + /// Ctor + TensorReductionAffineStridedParams( + Coord extent_, ///< Extent of source tensor + ElementOutput * dst_ptr_, ///< Output tensor data + int64_t dst_stride_[], ///< Stride (units of elements) + ElementSource const * src_ptr_, ///< Source tensor data + int64_t src_stride_[], ///< Stride (units of elements) + ElementCompute *device_workspace_, ///< Pointer to device workspace for inter-CTA reductions + int64_t workspace_stride_, ///< Stride between workspaces + int workspace_count_, ///< Number of workspaces + ReductionOp reduction_op_, ///< Reduction operator + ElementCompute reduction_identity_ = ElementCompute() ///< Identity element for reduction operator + ): + extent(extent_), + inner_count(1), + outer_count(1), + destination(dst_ptr_), + source(src_ptr_), + device_workspace(device_workspace_), + workspace_outer_stride(0), + workspace_stride(workspace_stride_), + workspace_count(workspace_count_), + reduction_op(reduction_op_), + reduction_identity(reduction_identity_) { + + // Initialize divisors for fast div-mod + for (int p = 1; p < kRank; ++p) { + divmod[p - 1] = FastDivmodU64(uint64_t(extent[p])); + } + + int input_size_bits = sizeof_bits::value; + int output_size_bits = sizeof_bits::value; + + workspace_outer_stride = workspace_stride * workspace_count; + + // Compute strides in units of bytes + for (int p = 0; p < kReducedRank - 1; ++p) { + dst_stride[p] = dst_stride_[p] * output_size_bits / 8; + } + + for (int p = 0; p < kRank - 1; ++p) { + src_stride[p] = src_stride_[p] * input_size_bits / 8; + } + + // Compute number of elements in strided ranks + for (int p = 0; p < kReducedRank - 1; ++p) { + outer_count *= uint64_t(extent[p]); + } + + for (int p = 0; p < kInnerRank; ++p) { + inner_count *= uint64_t(extent[kReducedRank + p - 1]); + } + } +}; + +/// Kernel to reduce a tensor with affine layout over a set of ranks *EXCLUDING* the contiguous +/// rank. This leads to favorable vectorized memory accesses over the contiguous rank. +template < + int Rank, ///< Rank of source tensor (e.g. NDHWC => 5) + int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2) + typename ElementOutput, ///< Data type of output tensor + typename ElementSource, ///< Data type of source tensor + typename ReductionOp, ///< Reduction operator + int VectorLength = 1, ///< Vector length for memory + typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation + int Threads = 256, ///< Number of participating threads + int BatchSize = 4 ///< Number of elements to load per batch +> +class TensorReductionAffineStrided { +public: + + static int const kRank = Rank; + static int const kReducedRank = ReducedRank; + static int const kVectorLength = VectorLength; + static int const kInnerRank = kRank - kReducedRank; + static int const kThreads = Threads; + static int const kBatchSize = BatchSize; + using ComputeFragment = Array; + using SourceFragment = AlignedArray; + using OutputFragment = AlignedArray; + + /// Shared memory allocation used for reduction within the CTA + struct SharedStorage { + Array workspace; + }; + + /// Parameters structure + using Params = TensorReductionAffineStridedParams< + Rank, + ReducedRank, + ElementOutput, + ElementSource, + ReductionOp, + VectorLength, + ElementCompute, + Threads, + BatchSize + >; + +private: + + /// Computes the coordinate and offset of a given linear index + CUTLASS_DEVICE + void compute_inner_coord_and_offset_( + Params const ¶ms, + Coord & coord, + int64_t &src_offset, + uint64_t linear_idx) const { + + // Decompose into coordinate + coord = CoordinateDecomposition(linear_idx, ¶ms.divmod[kReducedRank - 1]); + + // Compute linear offset + src_offset = 0; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kInnerRank; ++i) { + src_offset += params.src_stride[kReducedRank + i - 1] * coord[i]; + } + } + + /// Computes the coordinate and offset of a given linear index + CUTLASS_DEVICE + void compute_outer_coord_and_offset_( + Params const ¶ms, + Coord & coord, + int64_t &dst_offset, + int64_t &src_offset, + uint64_t linear_idx) const { + + // Decompose linear coordinate + coord = CoordinateDecomposition(linear_idx, params.divmod); + + // Compute offset into tensors + dst_offset = 0; + src_offset = 0; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kReducedRank - 1; ++i) { + dst_offset += params.dst_stride[i] * coord[i]; + src_offset += params.src_stride[i] * coord[i]; + } + } + + /// Reduces over the reduction indices + CUTLASS_DEVICE + ComputeFragment reduce_indices_( + Params const ¶ms, + ElementCompute *threadblock_workspace, + char const *src_byte_ptr) { + + NumericArrayConverter convert_source; + ReductionOp reduction_op(params.reduction_op); + + // Accumulated output + ComputeFragment identity_frag; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < identity_frag.size(); ++i) { + identity_frag[i] = params.reduction_identity; + } + + if (!params.inner_count) { + return identity_frag; + } + + ComputeFragment accumulator = identity_frag; + + // Compute the coordinate of the first access + int64_t src_byte_offset = 0; + Coord coord; + + uint64_t linear_idx = threadIdx.z + blockIdx.z * blockDim.z; + compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx); + + // Load the first vector + SourceFragment source_fragment[kBatchSize]; + + bool not_done = true; + + // Iterate over vectors in a linearized reduction index space + while (not_done) { + + bool guards[kBatchSize]; + + // Issue a batch of loads + CUTLASS_PRAGMA_UNROLL + for (int b = 0; b < kBatchSize; ++b) { + + if (linear_idx < params.inner_count) { + source_fragment[b] = *reinterpret_cast(src_byte_ptr + src_byte_offset); + guards[b] = true; + } + else { + guards[b] = false; + not_done = false; + } + + linear_idx += blockDim.z * gridDim.z; + compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx); + } + + // Perform a batch of reduction operations + CUTLASS_PRAGMA_UNROLL + for (int b = 0; b < kBatchSize; ++b) { + if (guards[b]) { + + auto cvt = convert_source(source_fragment[b]); + + accumulator = cutlass::reduction::thread::detail::ApplyArrayOperator( + reduction_op, + accumulator, + cvt); + } + } + }; + + // Optional reduction within a CTA + if (blockDim.z > 1) { + + // Linearized thread ID + int thread_idx = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); + + // all threads store to workspace + ComputeFragment *frag_ptr = reinterpret_cast(threadblock_workspace); + + frag_ptr[thread_idx] = accumulator; + + __syncthreads(); + + if (threadIdx.z == 0) { + // Load all additional block indices + for (int z = 1; z < blockDim.z; ++z) { + ComputeFragment frag = frag_ptr[thread_idx + z * blockDim.x * blockDim.y]; + + accumulator = cutlass::reduction::thread::detail::ApplyArrayOperator( + reduction_op, + accumulator, + frag); + } + } + + __syncthreads(); + } + + return accumulator; + } + +public: + + /// Perform a reduction + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + int coord_c = (blockIdx.x * blockDim.x + threadIdx.x) * kVectorLength; + + char const * src_byte_ptr = reinterpret_cast(params.source + coord_c); + char * dst_byte_ptr = nullptr; + + // If performing a reduction across CTAs, redirect output to device workspace + if (gridDim.z == 1) { + dst_byte_ptr = reinterpret_cast(params.destination + coord_c); + } + else { + dst_byte_ptr = reinterpret_cast(params.device_workspace + coord_c); + } + + // If the C index is out of bounds, exit + if (coord_c >= params.extent[kRank - 1]) { + return; + } + + int64_t idx_linear = blockIdx.y * blockDim.y + threadIdx.y; + + // Use modulo division to compute location + Coord outer_coord; + int64_t dst_byte_offset; + int64_t src_byte_offset; + + compute_outer_coord_and_offset_( + params, + outer_coord, + dst_byte_offset, + src_byte_offset, + idx_linear); + + if (gridDim.z == 1) { + + /// Complete the reduction with no workspace + while (idx_linear < params.outer_count) { + + ComputeFragment result; + + result = reduce_indices_( + params, + shared_storage.workspace.data(), + src_byte_ptr + src_byte_offset); + + // Store the result after possible final reduction within the CTA + if (threadIdx.z == 0) { + + // Convert to output type and store + NumericArrayConverter convert_output; + auto cvt = convert_output(result); + + *reinterpret_cast(dst_byte_ptr + dst_byte_offset) = + reinterpret_cast(cvt); + } + + // Update indices and pointers + idx_linear += gridDim.y * blockDim.y; + + compute_outer_coord_and_offset_( + params, + outer_coord, + dst_byte_offset, + src_byte_offset, + idx_linear); + + } // while + } + else { + + /// Complete the reduction with a device workspace + while (idx_linear < params.outer_count) { + + ComputeFragment result; + + result = reduce_indices_( + params, + shared_storage.workspace.data(), + src_byte_ptr + src_byte_offset); + + // Store the result after possible final reduction within the CTA + if (threadIdx.z == 0) { + + int64_t byte_offset = + blockIdx.z * params.workspace_stride + idx_linear * params.workspace_outer_stride; + + // No conversion - store in compute type + *reinterpret_cast(dst_byte_ptr + byte_offset) = + reinterpret_cast(result); + } + + // Update indices and pointers + idx_linear += gridDim.y * blockDim.y; + + compute_outer_coord_and_offset_( + params, + outer_coord, + dst_byte_offset, + src_byte_offset, + idx_linear); + + } // while (outer index) + } // if () + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Kernel to perform final reduction +template < + int Rank, ///< Rank of source tensor (e.g. NDHWC => 5) + int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2) + typename ElementOutput, ///< Data type of output tensor + typename ElementSource, ///< Data type of source tensor + typename ReductionOp, ///< Reduction operator + int VectorLength = 1, ///< Vector length for memory + typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation + int Threads = 256, ///< Number of participating threads + int BatchSize = 4 ///< Number of elements to load per batch +> +class TensorReductionAffineStridedFinal { +public: + + static int const kRank = Rank; + static int const kReducedRank = ReducedRank; + static int const kVectorLength = VectorLength; + static int const kInnerRank = kRank - kReducedRank; + static int const kThreads = Threads; + static int const kBatchSize = BatchSize; + using ComputeFragment = Array; + using SourceFragment = AlignedArray; + using OutputFragment = AlignedArray; + + /// Shared memory + struct SharedStorage { }; + + /// Parameters structure + using Params = TensorReductionAffineStridedParams< + Rank, + ReducedRank, + ElementOutput, + ElementSource, + ReductionOp, + VectorLength, + ElementCompute, + Threads, + BatchSize + >; + +private: + + /// Computes the coordinate and offset of a given linear index + CUTLASS_DEVICE + void compute_outer_coord_and_offset_( + Params const ¶ms, + Coord & coord, + int64_t &dst_offset, + uint64_t linear_idx) const { + + // Decompose linear index + coord = CoordinateDecomposition(linear_idx, params.divmod); + + // Compute tensor offset + dst_offset = 0; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kReducedRank - 1; ++i) { + dst_offset += params.dst_stride[i] * coord[i]; + } + } + + /// Reduces over the reduction indices + CUTLASS_DEVICE + ComputeFragment reduce_indices_( + Params const ¶ms, + char *src_byte_ptr) { + + ReductionOp reduction_op(params.reduction_op); + + // Accumulated output + ComputeFragment identity_frag; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < identity_frag.size(); ++i) { + identity_frag[i] = params.reduction_identity; + } + + ComputeFragment accumulator = identity_frag; + ComputeFragment workspace_fragments[kBatchSize]; + + // Partially unrolled loop + for (int idx = 0; idx < params.workspace_count; idx += kBatchSize) { + + // Issue a batch of loads + CUTLASS_PRAGMA_UNROLL + for (int b = 0; b < kBatchSize; ++b) { + if (idx + b < params.workspace_count) { + workspace_fragments[b] = + *reinterpret_cast(src_byte_ptr); + } + else { + workspace_fragments[b] = identity_frag; + } + src_byte_ptr += + params.workspace_stride; + } + + // Perform a reduction + CUTLASS_PRAGMA_UNROLL + for (int b = 0; b < kBatchSize; ++b) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kVectorLength; ++i) { + accumulator[i] = reduction_op(accumulator[i], workspace_fragments[b][i]); + } + } + } + + return accumulator; + } + +public: + + // + // Methods + // + + /// Perform a reduction + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + int coord_c = (blockIdx.x * blockDim.x + threadIdx.x) * kVectorLength; + + char * src_byte_ptr = reinterpret_cast(params.device_workspace + coord_c); + char * dst_byte_ptr = reinterpret_cast(params.destination + coord_c); + + // If the C index is out of bounds, exit + if (coord_c >= params.extent[kRank - 1]) { + return; + } + + int64_t idx_linear = blockIdx.y * blockDim.y + threadIdx.y; + + // Use modulo division to compute location + Coord outer_coord; + int64_t dst_byte_offset; + + compute_outer_coord_and_offset_( + params, + outer_coord, + dst_byte_offset, + idx_linear); + + /// Complete the reduction + while (idx_linear < params.outer_count) { + + int64_t src_byte_offset = idx_linear * params.workspace_outer_stride; + + ComputeFragment result = reduce_indices_( + params, + src_byte_ptr + src_byte_offset); + + // Convert to output type and store + NumericArrayConverter convert_output; + auto cvt = convert_output(result); + + *reinterpret_cast(dst_byte_ptr + dst_byte_offset) = + reinterpret_cast(cvt); + + // Update indices and pointers + idx_linear += gridDim.y * blockDim.y; + + compute_outer_coord_and_offset_( + params, + outer_coord, + dst_byte_offset, + idx_linear); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace reduction +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/thread/reduce.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/thread/reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..4f6e180acb790e3b29826582808cb1212a051bf0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/thread/reduce.h @@ -0,0 +1,234 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines basic thread level reduction with specializations for Array. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/numeric_types.h" +#include "cutlass/array.h" +#include "cutlass/half.h" +#include "cutlass/functional.h" + +namespace cutlass { +namespace reduction { +namespace thread { + +/// Structure to compute the thread level reduction +template +struct Reduce; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial Specialization of Reduce for "plus" (a functional operator) +template +struct Reduce< plus, T > { + + CUTLASS_HOST_DEVICE + T operator()(T lhs, T const &rhs) const { + plus _op; + return _op(lhs, rhs); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization of Reduce for Array +template +struct Reduce < plus, Array> { + + CUTLASS_HOST_DEVICE + Array operator()(Array const &in) const { + + Array result; + Reduce< plus, T > scalar_reduce; + result.clear(); + + CUTLASS_PRAGMA_UNROLL + for (auto i = 0; i < N; ++i) { + result[0] = scalar_reduce(result[0], in[i]); + } + + return result; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specializations of Reduce for Array +template +struct Reduce < plus, Array > { + + CUTLASS_HOST_DEVICE + Array operator()(Array const &input) { + + Array result; + + // If there is only 1 element - there is nothing to reduce + if( N ==1 ){ + + result[0] = input.front(); + + } else { + + #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600) + + __half result_d; + Array const *in_ptr_half = reinterpret_cast const *>(&input); + Array const *in_ptr_half2 = reinterpret_cast const *>(&input); + __half2 const *x_in_half2 = reinterpret_cast<__half2 const *>(in_ptr_half2); + + // Set initial result = first half2, in case N==2 + __half2 tmp_result = x_in_half2[0]; + + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < N/2; ++i) { + + tmp_result = __hadd2(x_in_half2[i], tmp_result); + + } + + result_d = __hadd(__low2half(tmp_result), __high2half(tmp_result)); + + // One final step is needed for odd "N" (to add the (N-1)th element) + if( N%2 ){ + + __half last_element; + Array tmp_last; + Array *tmp_last_ptr = &tmp_last; + tmp_last_ptr[0] = in_ptr_half[N-1]; + last_element = reinterpret_cast<__half const &>(tmp_last); + + result_d = __hadd(result_d, last_element); + + } + + Array *result_ptr = &result; + *result_ptr = reinterpret_cast &>(result_d); + + #else + + Reduce< plus, half_t > scalar_reduce; + result.clear(); + + CUTLASS_PRAGMA_UNROLL + for (auto i = 0; i < N; ++i) { + + result[0] = scalar_reduce(result[0], input[i]); + + } + + #endif + } + + return result; + + } +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specializations of Reduce for AlignedArray +template +struct Reduce < plus, AlignedArray > { + + CUTLASS_HOST_DEVICE + Array operator()(AlignedArray const &input) { + + Array result; + + // If there is only 1 element - there is nothing to reduce + if( N ==1 ){ + + result[0] = input.front(); + + } else { + + #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600) + + __half result_d; + AlignedArray const *in_ptr_half = reinterpret_cast const *>(&input); + AlignedArray const *in_ptr_half2 = reinterpret_cast const *>(&input); + __half2 const *x_in_half2 = reinterpret_cast<__half2 const *>(in_ptr_half2); + + // Set initial result = first half2, in case N==2 + __half2 tmp_result = x_in_half2[0]; + + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < N/2; ++i) { + + tmp_result = __hadd2(x_in_half2[i], tmp_result); + + } + + result_d = __hadd(__low2half(tmp_result), __high2half(tmp_result)); + + // One final step is needed for odd "N" (to add the (N-1)th element) + if( N%2 ){ + + __half last_element; + AlignedArray tmp_last; + AlignedArray *tmp_last_ptr = &tmp_last; + tmp_last_ptr[0] = in_ptr_half[N-1]; + last_element = reinterpret_cast<__half const &>(tmp_last); + + result_d = __hadd(result_d, last_element); + + } + + Array *result_ptr = &result; + *result_ptr = reinterpret_cast &>(result_d); + + #else + + Reduce< plus, half_t > scalar_reduce; + result.clear(); + + CUTLASS_PRAGMA_UNROLL + for (auto i = 0; i < N; ++i) { + + result[0] = scalar_reduce(result[0], input[i]); + + } + + #endif + } + + return result; + + } +}; +} +} +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/thread/reduction_operators.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/thread/reduction_operators.h new file mode 100644 index 0000000000000000000000000000000000000000..d54bcc0c2fd6d517a1ed3c55b1a14e248ac41d9e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/thread/reduction_operators.h @@ -0,0 +1,235 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Kernel performing a reduction over densely packed tensors in global memory +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/numeric_types.h" +#include "cutlass/array.h" +#include "cutlass/functional.h" +#include "cutlass/numeric_conversion.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace reduction { +namespace thread { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Mixed-precision reduction +template < + typename ElementAccumulator_, + typename Element_, + int Count = 1 +> +struct ReduceAdd { + + // + // Type definitions + // + + using ElementAccumulator = ElementAccumulator_; + using Element = Element_; + static int const kCount = Count; + + using FragmentAccumulator = cutlass::Array; + using FragmentElement = cutlass::Array; + + struct Params { }; + + // + // Data members + // + + /// Parameters object + Params params; + + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + ReduceAdd(Params params_ = Params()): params(params_) { } + + /// Operator + CUTLASS_HOST_DEVICE + FragmentAccumulator operator()( + FragmentAccumulator accumulator, + FragmentElement element) const { + + plus op; + + NumericArrayConverter< + ElementAccumulator, + Element, + kCount, + PreferredRoundingMode::kRound> converter; + + return op(accumulator, converter(element)); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +/// Special handling for binary operators +template +struct VectorizeArrayOperation { + + using ValueType = Array; + + CUTLASS_HOST_DEVICE + ValueType operator()( + ReductionOp const &reduction_op, + ValueType const &lhs, + ValueType const &rhs) const { + + ValueType result; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < N; ++i) { + result[i] = reduction_op(lhs[i], rhs[i]); + } + + return result; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct ReduceArrayOperation { + + using ArrayType = Array; + + CUTLASS_HOST_DEVICE + Element operator()( + ReductionOp const &reduction_op, + ArrayType const &array) const { + + Element item = reduction_op(array[0], array[1]); + + CUTLASS_PRAGMA_UNROLL + for (int i = 2; i < N; ++i) { + item = reduction_op(item, array[i]); + } + + return item; + } +}; + +template +struct ReduceArrayOperation, uint1b_t, N> { + + using ArrayType = Array; + + CUTLASS_HOST_DEVICE + uint1b_t operator()( + logical_and const &reduction_op, + ArrayType const &array) const { + + uint8_t const *ptr = reinterpret_cast(&array); + bool item = false; + + CUTLASS_PRAGMA_UNROLL + for (int byte = 0; byte < (N + 7) / 8; ++byte) { + uint8_t bits = ptr[byte]; + item = (item || !bits); + } + + return uint1b_t(!item); + } +}; + +template +struct ReduceArrayOperation, uint1b_t, N> { + + using ArrayType = Array; + + CUTLASS_HOST_DEVICE + uint1b_t operator()( + logical_and const &reduction_op, + ArrayType const &array) const { + + uint8_t const *ptr = reinterpret_cast(&array); + bool item = true; + + CUTLASS_PRAGMA_UNROLL + for (int byte = 0; byte < (N + 7) / 8; ++byte) { + uint8_t bits = ptr[byte]; + item = (item || bits); + } + + return uint1b_t(item); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Helper function to infer template argument types +template +CUTLASS_HOST_DEVICE +Array ApplyArrayOperator( + ReductionOp const &reduction_op, + Array const &lhs, + Array const &rhs) { + + VectorizeArrayOperation vectorize_op; + + return vectorize_op(reduction_op, lhs, rhs); +} + +/// Helper to reduce an array +template +Element ReduceArray(ReductionOp const &reduction_op, Array const &array) { + ReduceArrayOperation reduce_array_op; + + return reduce_array_op(reduction_op, array); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace detail + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace thread +} // namespace reduction +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/threadblock_swizzle.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/threadblock_swizzle.h new file mode 100644 index 0000000000000000000000000000000000000000..5dd6e4423ac4c3a6adfce66e95a3080e308d641e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/reduction/threadblock_swizzle.h @@ -0,0 +1,67 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +**************************************************************************************************/ +/*! \file +\brief Defies functors for mapping blockIdx to partitions of the batched reduction computation. +*/ +#pragma once +#include "cutlass/coord.h" + +namespace cutlass { +namespace reduction { +struct DefaultBlockSwizzle { + /// Ctor + CUTLASS_HOST_DEVICE DefaultBlockSwizzle() {} + + /// Swizzle the block index. + CUTLASS_DEVICE dim3 swizzle() { return blockIdx; } + + /// + CUTLASS_HOST_DEVICE dim3 get_grid_layout(Coord<3> const &problem_size, + Coord<3> const &OutputTile) { + assert(OutputTile[0] == 1 && OutputTile[1] == 1); + assert((problem_size[0] * problem_size[1] * problem_size[2]) % OutputTile[2] == 0); + dim3 grid; + grid.x = problem_size[0] * problem_size[1] * problem_size[2] + / OutputTile[2] ; + return grid; + } + + /// + CUTLASS_DEVICE Coord<3> get_threadblock_offset(Coord<3> const &SubTile) { + assert(SubTile[0] == 1 && SubTile[1] == 1); + dim3 block = swizzle(); + Coord<3> threadblock_offset = + make_Coord(0, 0, block.x * SubTile[2]); + return threadblock_offset; + } +}; +} // namespace reduction +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/thread/matrix.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/thread/matrix.h new file mode 100644 index 0000000000000000000000000000000000000000..4793fcbf273bf5012c7340da9753e4680a5c6a05 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/thread/matrix.h @@ -0,0 +1,198 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines a matrix object intended for storing data in registers and operations within + a CUDA thread. +*/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/matrix_coord.h" + +namespace cutlass { +namespace thread { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Per-thread matrix object storing a packed matrix +template < + typename Element, + int Rows, + int Columns, + typename Layout = layout::RowMajor +> +class Matrix : public Array { +public: + + // Verify layout refers to a rank=2 matrix. + static_assert( + Layout::kRank == 2, + "Layout type must refer to a rank=2 matrix"); + + /// Base type + using Base = Array; + + /// Element type + using Element = Element_; + + /// Number of rows + static int const kRows = Rows; + + /// Number of columns + static int const kColumns = Columns; + + /// Layout within the array + using Layout = Layout_; + + /// Reference type to an element + using Reference = Element &; + + /// Logical rank of tensor index space + static int const kRank = 2; + + /// Index type + using Index = typename Layout::Index; + + /// Long index used for pointer offsets + using LongIndex = typename Layout::LongIndex; + + /// Coordinate in logical tensor space + using TensorCoord = typename Layout::TensorCoord; + + /// Stride type + using Stride = typename Layout::Stride; + + /// TensorRef to matrix object + using TensorRef = TensorRef; + + /// TensorRef to constant matrix object + using ConstTensorRef = typename TensorRef::ConstTensorRef; + + /// TensorRef to matrix object + using TensorView = TensorView; + + /// TensorRef to constant matrix object + using ConstTensorView = typename TensorView::ConstTensorView; + + /// Diagonal vector + using Diagonal = Vector; + +private: + + +public: + + // + // Methods + // + + /// Returns the size of the object + CUTLASS_HOST_DEVICE + static MatrixCoord extent() { + return make_Coord(kRows, kColumns); + } + + /// Returns the layout object + CUTLASS_HOST_DEVICE + static Layout layout() { + return Layout::packed(extent()); + } + + /// Ctor + CUTLASS_HOST_DEVICE + Matrix() { } + + /// Ctor + CUTLASS_HOST_DEVICE + Matrix(Diagonal const &diag) { + } + + /// Returns a TensorRef pointing to the first element of the tensor. + CUTLASS_HOST_DEVICE + TensorRef ref() { + return TensorRef(this->data(), layout()); + } + + /// Returns a TensorRef pointing to the first element of the tensor. + CUTLASS_HOST_DEVICE + ConstTensorRef const_ref() const { + return ConstTensorRef(this->data(), layout()); + } + + /// Returns a TensorRef pointing to the first element of the tensor. + CUTLASS_HOST_DEVICE + TensorView view() { + return TensorView(ref(), extent()); + } + + /// Returns a TensorView to const data + CUTLASS_HOST_DEVICE + ConstTensorView const_view() const { + return ConstTensorView(const_ref(), extent()); + } + + /// Returns a reference to the element at a given Coord + CUTLASS_HOST_DEVICE + Reference at(MatrixCoord const& coord) const { + typename Base::size_type offset_(layout().offset(coord)); + return Base::at(offset_); + } + + /// Returns the number of scalar elements needed to store tensor. + CUTLASS_HOST_DEVICE + LongIndex capacity() const { + return LongIndex(Base::size()); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Column vector defined as a matrix with exactly one column +template < + typename Element, + int Rows, + typename Layout = layout::ColumnMajor +> +using ColumnVector = Matrix; + +/// Row vector defined as a matrix with exactly one row +template < + typename Element, + int Columns, + typename Layout = layout::RowMajor +> +using RowVector = Matrix; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace thread +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/collective/sm90_wgmma_transpose.hpp b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/collective/sm90_wgmma_transpose.hpp new file mode 100644 index 0000000000000000000000000000000000000000..6e41b7dbbb3df170dfce8d3f1ff55cabf0d7d95f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/collective/sm90_wgmma_transpose.hpp @@ -0,0 +1,755 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing how threads are mapped to a given tile. +*/ + +#pragma once + +#include "cute/arch/mma_sm90_gmma.hpp" +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace collective { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { +using namespace cute; + +template +constexpr auto +gmma_smem_transpose_or_passthrough() { + if constexpr (Transpose) { + if constexpr (cute::is_same_v, SmemLayoutAtom>) { + return GMMA::Layout_K_SW128_Atom{}; + } + else if constexpr (cute::is_same_v, SmemLayoutAtom>) { + return GMMA::Layout_K_SW64_Atom{}; + } + else if constexpr (cute::is_same_v, SmemLayoutAtom>) { + return GMMA::Layout_K_SW32_Atom{}; + } + else if constexpr (cute::is_same_v, SmemLayoutAtom>) { + return GMMA::Layout_K_INTER_Atom{}; + } + else { + static_assert(cutlass::detail::dependent_false, "Unsupported Layout_SW_Atom for B SMEM transposition"); + } + } + else { + return SmemLayoutAtom{}; + } +} + +template +constexpr auto +use_universal_transposition() { + if constexpr (sizeof(ElementType) == 1) { + return !cute::is_same_v, SmemCopyAtom>; + } + else if constexpr (sizeof(ElementType) == 4){ + // Only universal transposition can handle SW64 and Non swizzle SMEM layout + if constexpr (cute::is_same_v, SmemCopyAtom> || + cute::is_same_v, SmemCopyAtom>) { + return true; + } + else { + return false; + } + } + else { + static_assert(cutlass::detail::dependent_false, "Unsupported ElementType for B SMEM transposition"); + } +} + +template< + class TiledMma_, + class SmemLayoutB_, + class SmemLayoutAtomB_, + class ElementB_> +class NoTranspositionOperandB { +public: + using TiledMma = TiledMma_; + using SmemLayoutB = SmemLayoutB_; + using SmemLayoutAtomB = SmemLayoutAtomB_; + using ElementB = ElementB_; + + constexpr CUTLASS_HOST_DEVICE + NoTranspositionOperandB( + int, + int, + TiledMma, + SmemLayoutB, + SmemLayoutAtomB, + ElementB) { } + + template < + class TensorSmemB, + class TensorTransposedSmemB> + CUTLASS_DEVICE void operator()( + TensorSmemB const&, + TensorTransposedSmemB const&, + int, int) { } + + CUTLASS_DEVICE void synchronize(int) { } + + CUTLASS_DEVICE void synchronize() { } + + template < + class TensorSmemB, + class TensorTransposedSmemB> + CUTLASS_DEVICE void transpose( + TensorSmemB const&, + TensorTransposedSmemB const&, + int) { } +}; + +template< + class TiledMma_, + class SmemLayoutB_, + class SmemLayoutAtomB_, + class ElementB_> +class UniversalTranspositionOperandB { +public: + using TiledMma = TiledMma_; + using SmemLayoutB = SmemLayoutB_; + using SmemLayoutAtomB = SmemLayoutAtomB_; + using ElementB = ElementB_; + + constexpr CUTLASS_HOST_DEVICE + UniversalTranspositionOperandB( + int warp_idx_, + int warp_group_thread_idx_, + TiledMma, + SmemLayoutB, + SmemLayoutAtomB, + ElementB) + : warp_idx(warp_idx_) + , warp_group_thread_idx(warp_group_thread_idx_) { } + + template < + class TensorSmemB, + class TensorTransposedSmemB> + CUTLASS_DEVICE void operator()( + TensorSmemB const& sB, + TensorTransposedSmemB const& gmma_sB, + int read_stage, int current_step) { + if (current_step > 0) { + return; + } + + constexpr int NumMathWarpGroup = size(TiledMma{}) / NumThreadsPerWarpGroup; + static_assert(NumMathWarpGroup == 1 || + (!detail::use_universal_transposition() && NumMathWarpGroup == 2), + "Wrong math warp group number for TransposeB"); + constexpr int WarpgroupTileSize = size<1>(SmemLayoutB{}); // A warp group tile would process entire Smem K. + + constexpr int BytesPerSmemSwizzleUnit = 16; + constexpr int WarpThreadShapeN = BytesPerSmemSwizzleUnit / sizeof(ElementB); + ////////////////////////////////////////////////////////////////////////////////////////////////////////////// + /// Universal transposition, need warp_group sync between load and store. + /// The number of reg used depends on the input elementB. + ////////////////////////////////////////////////////////////////////////////////////////////////////////////// + /* + In one copy step, a warp group would load WarpgroupTileSize * WarpgroupTileSize tile then store to transposed location. + In warp_group_tile, each warp holds Four WarpTileSize x WarpTileSize elements: + K + ------------ + | W0 W1 W2 W3 --- + | W0 W1 W2 W3 | + | W0 W1 W2 W3 | --> Copy Step 0 + | W0 W1 W2 W3 --- + .... + | W0 W1 W2 W3 --- + | W0 W1 W2 W3 | + | W0 W1 W2 W3 | --> Copy Step n + | W0 W1 W2 W3 --- + */ + static_assert((NumThreadsPerWarpGroup % WarpThreadShapeN == 0), "Unsupported warp thread layout."); + constexpr auto WarpgroupThreadLayout = make_layout(make_shape(Int{}, Int{})); + + // Get copy tile and partition to each thread + auto sB_tiled_copy = make_tiled_copy( + Copy_Atom{}, + WarpgroupThreadLayout, // thr_layout + Layout<_1>{} // val_layout + ); + static_assert(size(sB_tiled_copy) == size(TiledMma{}), "Wrong thread number in TiledCopy."); + + auto sB_thr_copy = sB_tiled_copy.get_thread_slice(warp_group_thread_idx); + Tensor tCsB = sB_thr_copy.partition_S( sB(_,_,read_stage)); // (CPY, CPY_N, CPY_K) + Tensor tCsB_transposed = sB_thr_copy.partition_D(gmma_sB(_,_,read_stage)); // (CPY, CPY_N, CPY_K) + + // Divide partitioned tile to limit register usage + constexpr int CopySteps = size<0>(SmemLayoutB{}) / WarpgroupTileSize; + constexpr auto CopyTileShape = make_shape(size<0>(tCsB), Int< size<1>(tCsB) / CopySteps >{}, size<2>(tCsB)); + static_assert(size<1>(tCsB) % CopySteps == 0, "CopySteps must evenly divide rank 1 size of partitioned SMEM."); + + Tensor tCsB_copy_tile = zipped_divide(tCsB, CopyTileShape); + Tensor tCsB_copy_tile_transposed = zipped_divide(tCsB_transposed, CopyTileShape); + auto transpose_fragment = make_fragment_like(tCsB_copy_tile(_,_0{})); + + CUTLASS_PRAGMA_NO_UNROLL + for (int step = 0; step < CopySteps; ++step) { + copy(sB_tiled_copy, tCsB_copy_tile(_,step), transpose_fragment); + + // Make sure all elements are read before being overwritten + __syncthreads(); + + copy(sB_tiled_copy, transpose_fragment, tCsB_copy_tile_transposed(_,step)); + } + } + + CUTLASS_DEVICE void synchronize(int step) { + if (step == 0) { + // SMEM fence to make sure B is transposed before math + cutlass::arch::fence_view_async_shared(); + cutlass::arch::NamedBarrier::sync(size(TiledMma{}), 1); + } + } + + CUTLASS_DEVICE void synchronize() { + // SMEM fence to make sure B is transposed before math + cutlass::arch::fence_view_async_shared(); + cutlass::arch::NamedBarrier::sync(size(TiledMma{}), 1); + } + + template < + class TensorSmemB, + class TensorTransposedSmemB> + CUTLASS_DEVICE void transpose( + TensorSmemB const& sB, + TensorTransposedSmemB const& gmma_sB, + int read_stage) { + + this->operator()(sB, gmma_sB, read_stage, 0); + synchronize(); + + } + +private: + const int warp_idx; + const int warp_group_thread_idx; +}; + +template< + class TiledMma_, + class SmemLayoutB_, + class SmemLayoutAtomB_, + class ElementB_> +class AsyncTranspositionOperandB { +public: + + using TiledMma = TiledMma_; + using SmemLayoutB = SmemLayoutB_; + using SmemLayoutAtomB = SmemLayoutAtomB_; + using ElementB = ElementB_; + + static constexpr int Steps = 2; + static constexpr int NumMathWarpGroup = size(TiledMma{}) / NumThreadsPerWarpGroup; + static constexpr int StepsPerWarpGroup = Steps / NumMathWarpGroup; + static_assert(NumMathWarpGroup <= 2, + "Wrong math warp group number for TransposeB"); + static constexpr int WarpgroupTileSize = size<1>(SmemLayoutB{}); // A warp group tile would process entire Smem K. + static constexpr int NumWarpsPerWarpGroup = NumThreadsPerWarpGroup / NumThreadsPerWarp; + + static constexpr int BytesPerSmemSwizzleUnit = 16; + static constexpr int WarpThreadShapeN = BytesPerSmemSwizzleUnit / sizeof(ElementB); + static constexpr int WarpThreadShapeK = NumThreadsPerWarp / WarpThreadShapeN; + static constexpr int NumWarpTilePerWarpgroupTile = NumWarpsPerWarpGroup * (Steps == 8 ? 2 : 1); + + static constexpr int WarpTileSize = WarpgroupTileSize / NumWarpTilePerWarpgroupTile; + static_assert(WarpTileSize >= WarpThreadShapeN && WarpTileSize >= WarpThreadShapeK, "Invaild warp thread shape." ); + static constexpr int TilesPerWarp = 2; // Each Warp would process 2 warp_tiles in one step. + static constexpr int64_t WarpTileNCoordLUT = 06723763275316420; + static constexpr int64_t WarpTileKCoordLUT = 05410541064206420; + static constexpr int NumStepsEncoded = 4; // Only encoding first 4 steps into LUT. + static constexpr int MaskPerStep = 07; // Each step is encoded into 3bits, + static constexpr int NumBitsPerStep = 3; + static constexpr int MaskPerWarp = 07777; // Each warp has 4 steps(12 bits) + static constexpr int NumBitsPerWarp = 12; + // Number of warp_group_tiles + static_assert(size<0>(SmemLayoutB{}) % WarpgroupTileSize == 0, + "Copy size must evenly divide SMEM tile."); + static constexpr int WarpgroupTileNum = size<0>(SmemLayoutB{}) / WarpgroupTileSize; + + static_assert(size<2>(typename TiledMma::AtomShape_MNK{}) <= WarpThreadShapeK, + "Need to be able to transpose first k-block in the first step"); + + constexpr CUTLASS_HOST_DEVICE + AsyncTranspositionOperandB( + int warp_idx_, + int warp_group_thread_idx_, + TiledMma, + SmemLayoutB, + SmemLayoutAtomB, + ElementB) + : warp_idx(warp_idx_) + , warp_group_thread_idx(warp_group_thread_idx_) + , warp_idx_in_warp_group(warp_idx_ % NumWarpsPerWarpGroup) + , current_warp_tile_n_coord_LUT((WarpTileNCoordLUT >> ((warp_idx_ + % NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp) + , current_warp_tile_k_coord_LUT((WarpTileKCoordLUT >> ((warp_idx_ + % NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp) { } + + template < + class TensorSmemB, + class TensorTransposedSmemB> + CUTLASS_DEVICE void operator()( + TensorSmemB const& sB, + TensorTransposedSmemB const& gmma_sB, + int read_stage, int current_step) + { + if (current_step >= StepsPerWarpGroup) { + return; + } + + static constexpr auto WarpThreadLayout = make_layout(make_shape(Int{}, Int{})); + ////////////////////////////////////////////////////////////////////////////////////////////////////////////// + /// A warp group uses 2 steps to transpose the whole WarpgroupTileSize x WarpgroupTileSize. + /// In each step, one warp would hold two warp_tiles. + /// Step 0: Step 1: + /// W0 W1 W2 W3 -- -- -- -- + /// W1 W0 -- -- -- -- W3 W2 + /// W2 -- -- -- -- W3 W0 W1 + /// W3 -- -- -- -- W2 W1 W0 + /// + ///////////////////////////////////////////////////////////////////////////////////////////////////////////// + /// + /// Fully static coord LUT to avoid extra register use. + /// [warp_id][step][warp_tile][n / k] + /// Step 0 Step 1 Step 2 Step 3 Step 4 Step 5 Step 6 Step 7 + /// {{{0,0}, {1,1}}, {{2,2}, {3,3}}, {{4,4}, {5,5}}, {{6,6}, {7,7}}, {{4,0}, {0,4}}, {{4,1}, {1,4}}, {{4,2}, {2,4}}, {{4,3}, {3,4}}}, // W0 + /// {{{1,0}, {0,1}}, {{3,2}, {2,3}}, {{5,4}, {4,5}}, {{7,6}, {6,7}}, {{5,0}, {0,5}}, {{5,1}, {1,5}}, {{5,2}, {2,5}}, {{5,3}, {3,5}}}, // W1 + /// {{{2,0}, {0,2}}, {{3,1}, {1,3}}, {{6,4}, {4,6}}, {{7,5}, {5,7}}, {{6,0}, {0,6}}, {{6,1}, {1,6}}, {{6,2}, {2,6}}, {{6,3}, {3,6}}}, // W2 + /// {{{3,0}, {0,3}}, {{2,1}, {1,2}}, {{7,4}, {4,7}}, {{6,5}, {5,6}}, {{7,0}, {0,7}}, {{7,1}, {1,7}}, {{7,2}, {2,7}}, {{7,3}, {3,7}}}, // W3 + /// + /// Encoding the coord of warp tile0 into two int64_t values. + /// Only encoding Step 0 ~ Step 4, since Step 5 ~ Step 7 have a straightforward pattern. + /// Only encoding warp tile0, since the coords of warp tile1 could be easily deduced from warp tile0. + /// The 2-step transposition and the 8-step transposition share the same encoding. + /// + ////////////////////////////////////////////////////////////////////////////////////////////////////////////// + + // Divide entire SMEM to multiple warp_tiles + constexpr auto WarpTileShape = make_shape(Int(), Int()); + Tensor s_tile = zipped_divide( sB(_,_,read_stage), WarpTileShape); + Tensor s_tile_transposed = zipped_divide(gmma_sB(_,_,read_stage), WarpTileShape); + + // Get copy tile + auto sB_tiled_copy = make_tiled_copy( + Copy_Atom{}, + WarpThreadLayout, // thr_layout + Layout<_1>{} // val_layout + ); + + static_assert(size(sB_tiled_copy) * NumWarpsPerWarpGroup == size(TiledMma{}) / NumMathWarpGroup, "Wrong thread number in TiledCopy."); + auto sB_thr_copy = sB_tiled_copy.get_thread_slice(warp_group_thread_idx % NumThreadsPerWarp); // slice based on lane_idx + + // Construct fragments for transposition + Tensor tmp_tCsB = sB_thr_copy.partition_S(flatten(s_tile(_, make_coord(_0{}, _0{})))); + decltype(make_fragment_like(tmp_tCsB)) transpose_fragments[TilesPerWarp] = { + make_fragment_like(tmp_tCsB), + make_fragment_like(tmp_tCsB) + }; + + [[maybe_unused]] int step = current_step * NumMathWarpGroup; + if constexpr (NumMathWarpGroup == 2) { + // For 2 math warpgroup, warp idx4~7 is 1st warp group and 8~9 is 2nd, so decide if 2nd warpgroup need warp idx divide 8. + step += warp_idx / (NumWarpsPerWarpGroup * 2); + } + + int tmp_warp_tile_n_coord_LUT = current_warp_tile_n_coord_LUT >> (NumBitsPerStep * current_step); + int tmp_warp_tile_k_coord_LUT = current_warp_tile_k_coord_LUT >> (NumBitsPerStep * current_step); + + if constexpr (NumMathWarpGroup == 2) { + tmp_warp_tile_n_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2)); + tmp_warp_tile_k_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2)); + } + + // decoding the warp tile coord. + int warp_tile0_n, warp_tile0_k; + if constexpr (StepsPerWarpGroup <= NumStepsEncoded) { + warp_tile0_n = tmp_warp_tile_n_coord_LUT & MaskPerStep; + warp_tile0_k = tmp_warp_tile_k_coord_LUT & MaskPerStep; + } else { + warp_tile0_n = step < NumStepsEncoded ? (tmp_warp_tile_n_coord_LUT & MaskPerStep) : 4 + warp_idx_in_warp_group; + warp_tile0_k = step < NumStepsEncoded ? (tmp_warp_tile_k_coord_LUT & MaskPerStep) : step - 4; + } + + int warp_tile1_n = warp_tile0_n == warp_tile0_k ? warp_tile0_n + 1 : warp_tile0_k; + int warp_tile1_k = warp_tile0_n == warp_tile0_k ? warp_tile0_k + 1 : warp_tile0_n; + + CUTLASS_PRAGMA_UNROLL + for (int warp_group_tile = 0; warp_group_tile < WarpgroupTileNum; ++warp_group_tile) { + + static_assert(TilesPerWarp == 2); + + // [warp_tile][n/k] + const int warp_tile_coord[TilesPerWarp][2] = { + // n k + {warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile0_n, warp_tile0_k}, // warp_tile 0 + {warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile1_n, warp_tile1_k} // warp_tile 1 + }; + + CUTLASS_PRAGMA_UNROLL + for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) { + Tensor tCsB = sB_thr_copy.partition_S( + flatten(s_tile(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1]))) + ); // (CPY, CPY_N, CPY_K) + + copy(sB_tiled_copy, tCsB, transpose_fragments[warp_tile]); + } + + // Make sure elements in two 8x8 warp tiles are all consumed + __syncwarp(); + + CUTLASS_PRAGMA_UNROLL + for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) { + Tensor tCsB_transposed = sB_thr_copy.partition_D( + flatten(s_tile_transposed(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1]))) + ); // (CPY, CPY_N, CPY_K) + copy(sB_tiled_copy, transpose_fragments[warp_tile], tCsB_transposed); + } + + } // loop warp_group_tile + } + + CUTLASS_DEVICE void synchronize(int step) { + if (step < StepsPerWarpGroup) { + // SMEM fence to make sure B is transposed before math + cutlass::arch::fence_view_async_shared(); + cutlass::arch::NamedBarrier::sync(size(TiledMma{}), 1); + } + } + + CUTLASS_DEVICE void synchronize() { + cutlass::arch::fence_view_async_shared(); + cutlass::arch::NamedBarrier::sync(size(TiledMma{}), 1); + } + + template < + class TensorSmemB, + class TensorTransposedSmemB> + CUTLASS_DEVICE void transpose( + TensorSmemB const& sB, + TensorTransposedSmemB const& gmma_sB, + int read_stage) { + + CUTLASS_PRAGMA_UNROLL + for(int i = 0; i < StepsPerWarpGroup; ++i) { + this->operator()(sB, gmma_sB, read_stage, i); + } + synchronize(); + + } +private: + const int warp_idx; + const int warp_group_thread_idx; + const int warp_idx_in_warp_group; + const int current_warp_tile_n_coord_LUT; + const int current_warp_tile_k_coord_LUT; +}; + +template< + class TiledMma_, + class SmemLayoutB_, + class SmemLayoutAtomB_, + class ElementB_> +class AsyncTranspositionOperandB_1BElementB { +public: + + static_assert(sizeof(ElementB_) == 1); + + using TiledMma = TiledMma_; + using SmemLayoutB = SmemLayoutB_; + using SmemLayoutAtomB = SmemLayoutAtomB_; + using ElementB = ElementB_; + + static constexpr int Steps = 8; + static constexpr int NumMathWarpGroup = size(TiledMma{}) / NumThreadsPerWarpGroup; + static constexpr int StepsPerWarpGroup = Steps / NumMathWarpGroup; + static_assert(NumMathWarpGroup <= 2, + "Wrong math warp group number for TransposeB"); + static constexpr int WarpgroupTileSize = size<1>(SmemLayoutB{}); // A warp group tile would process entire Smem K. + static constexpr int NumWarpsPerWarpGroup = NumThreadsPerWarpGroup / NumThreadsPerWarp; + + static constexpr int BytesPerSmemSwizzleUnit = 16; + static constexpr int WarpThreadShapeN = BytesPerSmemSwizzleUnit / sizeof(ElementB); + static constexpr int WarpThreadShapeK = NumThreadsPerWarp / WarpThreadShapeN; + static constexpr int NumWarpTilePerWarpgroupTile = NumWarpsPerWarpGroup * (Steps == 8 ? 2 : 1); + + static constexpr int WarpTileSize = WarpgroupTileSize / NumWarpTilePerWarpgroupTile; + static_assert(WarpTileSize >= WarpThreadShapeN && WarpTileSize >= WarpThreadShapeK, "Invaild warp thread shape." ); + static constexpr int TilesPerWarp = 2; // Each Warp would process 2 warp_tiles in one step. + static constexpr int64_t WarpTileNCoordLUT = 06723763275316420; + static constexpr int64_t WarpTileKCoordLUT = 05410541064206420; + static constexpr int NumStepsEncoded = 4; // Only encoding first 4 steps into LUT. + static constexpr int MaskPerStep = 07; // Each step is encoded into 3bits, + static constexpr int NumBitsPerStep = 3; + static constexpr int MaskPerWarp = 07777; // Each warp has 4 steps(12 bits) + static constexpr int NumBitsPerWarp = 12; + // Number of warp_group_tiles + static_assert(size<0>(SmemLayoutB{}) % WarpgroupTileSize == 0, + "Copy size must evenly divide SMEM tile."); + static constexpr int WarpgroupTileNum = size<0>(SmemLayoutB{}) / WarpgroupTileSize; + + + constexpr CUTLASS_HOST_DEVICE + AsyncTranspositionOperandB_1BElementB( + int warp_idx_, + int warp_group_thread_idx_, + TiledMma, + SmemLayoutB, + SmemLayoutAtomB, + ElementB) + : warp_idx(warp_idx_) + , warp_group_thread_idx(warp_group_thread_idx_) + , warp_idx_in_warp_group(warp_idx_ % NumWarpsPerWarpGroup) + , current_warp_tile_n_coord_LUT((WarpTileNCoordLUT >> ((warp_idx_ + % NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp) + , current_warp_tile_k_coord_LUT((WarpTileKCoordLUT >> ((warp_idx_ + % NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp) { } + + template < + class TensorSmemB, + class TensorTransposedSmemB> + CUTLASS_DEVICE void operator()( + TensorSmemB const& sB, + TensorTransposedSmemB const& gmma_sB, + int read_stage, int current_step) + { + if (current_step > 0) { + return; + } + + constexpr auto WarpThreadLayout = make_layout(make_shape(Int{}, Int{})); + ////////////////////////////////////////////////////////////////////////////////////////////////////////////// + /// A warp group uses 8 steps to transpose the whole WarpgroupTileSize x WarpgroupTileSize. + /// Divide a warp_group_tile into 8x8 warp_tiles to futher reduce the reg usage. + /// Step 0: Step 1: Step 2: Step 3: + /// W0 W1 W2 W3 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- + /// W1 W0 -- -- -- -- -- -- -- -- W3 W2 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- + /// W2 -- -- -- -- -- -- -- -- W3 W0 W1 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- + /// W3 -- -- -- -- -- -- -- -- W2 W1 W0 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- + /// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- -- + /// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W1 W0 -- -- -- -- -- -- -- -- W3 W2 + /// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W2 -- -- -- -- -- -- -- -- W3 W0 W1 + /// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W3 -- -- -- -- -- -- -- -- W2 W1 W0 + /// + /// Step 4: Step 5: Step 6: Step 7: + /// -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- + /// -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- + /// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- -- + /// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3 + /// W0 -- -- -- -- -- -- -- -- W0 -- -- -- -- -- -- -- -- W0 -- -- -- -- -- -- -- -- W0 -- -- -- -- + /// W1 -- -- -- -- -- -- -- -- W1 -- -- -- -- -- -- -- -- W1 -- -- -- -- -- -- -- -- W1 -- -- -- -- + /// W2 -- -- -- -- -- -- -- -- W2 -- -- -- -- -- -- -- -- W2 -- -- -- -- -- -- -- -- W2 -- -- -- -- + /// W3 -- -- -- -- -- -- -- -- W3 -- -- -- -- -- -- -- -- W3 -- -- -- -- -- -- -- -- W3 -- -- -- -- + /// + ///////////////////////////////////////////////////////////////////////////////////////////////////////////// + /// + /// Fully static coord LUT to avoid extra register use. + /// [warp_id][step][warp_tile][n / k] + /// Step 0 Step 1 Step 2 Step 3 Step 4 Step 5 Step 6 Step 7 + /// {{{0,0}, {1,1}}, {{2,2}, {3,3}}, {{4,4}, {5,5}}, {{6,6}, {7,7}}, {{4,0}, {0,4}}, {{4,1}, {1,4}}, {{4,2}, {2,4}}, {{4,3}, {3,4}}}, // W0 + /// {{{1,0}, {0,1}}, {{3,2}, {2,3}}, {{5,4}, {4,5}}, {{7,6}, {6,7}}, {{5,0}, {0,5}}, {{5,1}, {1,5}}, {{5,2}, {2,5}}, {{5,3}, {3,5}}}, // W1 + /// {{{2,0}, {0,2}}, {{3,1}, {1,3}}, {{6,4}, {4,6}}, {{7,5}, {5,7}}, {{6,0}, {0,6}}, {{6,1}, {1,6}}, {{6,2}, {2,6}}, {{6,3}, {3,6}}}, // W2 + /// {{{3,0}, {0,3}}, {{2,1}, {1,2}}, {{7,4}, {4,7}}, {{6,5}, {5,6}}, {{7,0}, {0,7}}, {{7,1}, {1,7}}, {{7,2}, {2,7}}, {{7,3}, {3,7}}}, // W3 + /// + /// Encoding the coord of warp tile0 into two int64_t values. + /// Only encoding Step 0 ~ Step 4, since Step 5 ~ Step 7 have a straightforward pattern. + /// Only encoding warp tile0, since the coords of warp tile1 could be easily deduced from warp tile0. + /// The 2-step transposition and the 8-step transposition share the same encoding. + /// + ////////////////////////////////////////////////////////////////////////////////////////////////////////////// + + // Divide entire SMEM to multiple warp_tiles + constexpr auto WarpTileShape = make_shape(Int(), Int()); + Tensor s_tile = zipped_divide( sB(_,_,read_stage), WarpTileShape); + Tensor s_tile_transposed = zipped_divide(gmma_sB(_,_,read_stage), WarpTileShape); + + // Get copy tile + auto sB_tiled_copy = make_tiled_copy( + Copy_Atom{}, + WarpThreadLayout, // thr_layout + Layout<_1>{} // val_layout + ); + static_assert(size(sB_tiled_copy) * NumWarpsPerWarpGroup == size(TiledMma{}) / NumMathWarpGroup, "Wrong thread number in TiledCopy."); + auto sB_thr_copy = sB_tiled_copy.get_thread_slice(warp_group_thread_idx % NumThreadsPerWarp); // slice based on lane_idx + + // Construct fragments for transposition + Tensor tmp_tCsB = sB_thr_copy.partition_S(flatten(s_tile(_, make_coord(_0{}, _0{})))); + decltype(make_fragment_like(tmp_tCsB)) transpose_fragments[TilesPerWarp] = { + make_fragment_like(tmp_tCsB), + make_fragment_like(tmp_tCsB) + }; + + CUTLASS_PRAGMA_NO_UNROLL + for (int warp_group_tile = 0; warp_group_tile < WarpgroupTileNum; ++warp_group_tile) { + int tmp_warp_tile_n_coord_LUT = current_warp_tile_n_coord_LUT; + int tmp_warp_tile_k_coord_LUT = current_warp_tile_k_coord_LUT; + constexpr int StepsPerWarpGroup = Steps / NumMathWarpGroup; + + if constexpr (NumMathWarpGroup == 2) { + tmp_warp_tile_n_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2)); + tmp_warp_tile_k_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2)); + } + + CUTLASS_PRAGMA_NO_UNROLL + for (int step_per_warp_group = 0; step_per_warp_group < StepsPerWarpGroup; ++step_per_warp_group) { + // For 2 math warpgroup, warp idx4~7 is 1st warp group and 8~9 is 2nd, so decide if 2nd warpgroup need warp idx divide 8. + int step = step_per_warp_group * NumMathWarpGroup + warp_idx / (NumWarpsPerWarpGroup * 2); + // decoding the warp tile coord. + int warp_tile0_n = step < NumStepsEncoded ? (tmp_warp_tile_n_coord_LUT & MaskPerStep) : 4 + warp_idx_in_warp_group; + int warp_tile0_k = step < NumStepsEncoded ? (tmp_warp_tile_k_coord_LUT & MaskPerStep) : step - 4; + int warp_tile1_n = warp_tile0_n == warp_tile0_k ? warp_tile0_n + 1 : warp_tile0_k; + int warp_tile1_k = warp_tile0_n == warp_tile0_k ? warp_tile0_k + 1 : warp_tile0_n; + + tmp_warp_tile_n_coord_LUT >>= NumBitsPerStep; + tmp_warp_tile_k_coord_LUT >>= NumBitsPerStep; + + static_assert(TilesPerWarp == 2); + + // [warp_tile][n/k] + const int warp_tile_coord[TilesPerWarp][2] = { + // n k + {warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile0_n, warp_tile0_k}, // warp_tile 0 + {warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile1_n, warp_tile1_k} // warp_tile 1 + }; + + CUTLASS_PRAGMA_UNROLL + for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) { + Tensor tCsB = sB_thr_copy.partition_S( + flatten(s_tile(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1]))) + ); // (CPY, CPY_N, CPY_K) + + copy(sB_tiled_copy, tCsB, transpose_fragments[warp_tile]); + } + + // Make sure elements in two 8x8 warp tiles are all consumed + __syncwarp(); + + CUTLASS_PRAGMA_UNROLL + for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) { + Tensor tCsB_transposed = sB_thr_copy.partition_D( + flatten(s_tile_transposed(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1]))) + ); // (CPY, CPY_N, CPY_K) + copy(sB_tiled_copy, transpose_fragments[warp_tile], tCsB_transposed); + } + } // lock step + } // loop warp_group_tile + } + + CUTLASS_DEVICE void synchronize(int step) { + if (step == 0) { + // SMEM fence to make sure B is transposed before math + cutlass::arch::fence_view_async_shared(); + cutlass::arch::NamedBarrier::sync(size(TiledMma{}), 1); + } + } + + CUTLASS_DEVICE void synchronize() { + cutlass::arch::fence_view_async_shared(); + cutlass::arch::NamedBarrier::sync(size(TiledMma{}), 1); + } + + template < + class TensorSmemB, + class TensorTransposedSmemB> + CUTLASS_DEVICE void transpose( + TensorSmemB const& sB, + TensorTransposedSmemB const& gmma_sB, + int read_stage) { + this->operator()(sB, gmma_sB, read_stage, 0); + synchronize(); + } + +private: + const int warp_idx; + const int warp_group_thread_idx; + const int warp_idx_in_warp_group; + const int current_warp_tile_n_coord_LUT; + const int current_warp_tile_k_coord_LUT; +}; + + +template< + class TiledMma, + class SmemLayoutB, + class SmemLayoutAtomB, + class ElementB, + bool TransposeB +> +constexpr CUTLASS_HOST_DEVICE +auto +make_transpose_operand_b( + int warp_idx, + int warp_group_thread_idx, + TiledMma, + SmemLayoutB, + SmemLayoutAtomB, + ElementB, + cute::bool_constant) +{ + if constexpr (!TransposeB) { + return NoTranspositionOperandB( + warp_idx, warp_group_thread_idx, TiledMma{}, + SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{}); + } + else if constexpr (use_universal_transposition()) { + return UniversalTranspositionOperandB( + warp_idx, warp_group_thread_idx, TiledMma{}, + SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{}); + } + else if constexpr (sizeof(ElementB) == 1) { + return AsyncTranspositionOperandB_1BElementB( + warp_idx, warp_group_thread_idx, TiledMma{}, + SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{}); + } + else { + return AsyncTranspositionOperandB( + warp_idx, warp_group_thread_idx, TiledMma{}, + SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{}); + } +} + +}; // namespace detail + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace collective +} // namespace transform +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/pitch_linear_thread_map.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/pitch_linear_thread_map.h new file mode 100644 index 0000000000000000000000000000000000000000..e099cebf3d6ea6d95a0feabadd122399663c1e16 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/pitch_linear_thread_map.h @@ -0,0 +1,926 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing how threads are mapped to a given tile. + +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/coord.h" +#include "cutlass/predicate_vector.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/tensor_view.h" +#include "cutlass/layout/pitch_linear.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { + +//////////////////////////////////////////////////////////////////////////////// + +/// Strip-mines a pitch-linear tile among a given number of threads, first along +/// the contiguous dimension then along the strided dimension. +/// +/// The tile must be divisible by the thread count such that all threads may +/// execute the same number of iterations with the same delta to exhaustively +/// cover the tile. +/// +/// This class satisfies the "RegularThreadMapping" concept. +/// +/// This ThreadMap is used by SIMT kernels and operand E of the sparse tensor +/// kernels. +template < + typename Shape_, + int Threads, + int ElementsPerAccess = 1 +> +struct PitchLinearStripminedThreadMap { + + /// Tensor coordinate + using TensorCoord = layout::PitchLinearCoord; + + /// Tile shape + using Shape = Shape_; + + /// Number of threads total + static int const kThreads = Threads; + + /// Extract vector length from Layout + static int const kElementsPerAccess = ElementsPerAccess; + + /// Shape of access by each thread + using ThreadAccessShape = layout::PitchLinearShape; + + /// Internal implementation details + struct Detail { + + static_assert(!(Shape::kContiguous % kElementsPerAccess), ""); + + /// Shape of the tile in units of vectors + using ShapeVec = layout::PitchLinearShape< + Shape::kContiguous / kElementsPerAccess, + Shape::kStrided + >; + + static_assert((Threads < ShapeVec::kContiguous && !(ShapeVec::kContiguous % kThreads)) || + (!(kThreads % ShapeVec::kContiguous)), + "Shape must be divisible by number of iterations of each thread."); + }; + + /// Number of iterations by each thread + using Iterations = typename platform::conditional< + Threads >= Detail::ShapeVec::kContiguous, + layout::PitchLinearShape< + 1, + // Redo the comparison here to work around divide by zero compiler + // error. The compiler evaluates both path of platform::conditional. + (Threads >= Detail::ShapeVec::kContiguous + ? (Detail::ShapeVec::kStrided + (kThreads / Detail::ShapeVec::kContiguous - 1)) / + (kThreads / Detail::ShapeVec::kContiguous) + : 0)>, + layout::PitchLinearShape>::type; + + + /// Interval between accesses along each dimension of the tensor's logical coordinate space + /// (in units of Elements) + using Delta = typename platform::conditional< + Threads >= Detail::ShapeVec::kContiguous, + layout::PitchLinearShape< + 1, + kThreads / Detail::ShapeVec::kContiguous + >, + layout::PitchLinearShape< + kThreads * kElementsPerAccess, + 1 + > + >::type; + + /// Shape of the tile in units of vectors + using StorageShape = typename platform::conditional< + Threads >= Detail::ShapeVec::kContiguous, + layout::PitchLinearShape, + layout::PitchLinearShape>::type; + + /// Maps thread ID to a coordinate offset within the tensor's logical coordinate space + /// (in units of Elements) + CUTLASS_HOST_DEVICE + static TensorCoord initial_offset(int thread_id) { + return TensorCoord( + (thread_id % Detail::ShapeVec::kContiguous) * kElementsPerAccess, + thread_id / Detail::ShapeVec::kContiguous); + } +}; + +/// This ThreadMap is used by GEMV +template < + typename Shape, + int Threads, + int ElementsPerAccess = 1 +> +struct PitchLinearTilePolicyStripminedThreadContiguous +{ + static_assert((Shape::kContiguous % (Threads * ElementsPerAccess)) == 0, + "Contiguous shape must divide number of threads"); + + using TensorCoord = layout::PitchLinearCoord; + + static int const kThreads = Threads; + static int const kElementsPerAccess = ElementsPerAccess; + + using Iterations = layout::PitchLinearShape< + Shape::kContiguous / (kThreads * kElementsPerAccess), + Shape::kStrided>; + + using Delta = layout::PitchLinearShape<1, 1>; + + CUTLASS_HOST_DEVICE + static TensorCoord initial_offset(int thread_id) + { + return TensorCoord(thread_id * Iterations::kContiguous * kElementsPerAccess, 0); + } +}; + +template < + typename Shape, + int Threads, + int ElementsPerAccess = 1 +> +struct PitchLinearTilePolicyStripminedThreadStrided +{ + static_assert((Shape::kStrided % Threads == 0), + "Strided shape must divide number of threads"); + + using TensorCoord = layout::PitchLinearCoord; + + static int const kThreads = Threads; + static int const kElementsPerAccess = ElementsPerAccess; + + using Iterations = layout::PitchLinearShape< + Shape::kContiguous / kElementsPerAccess, + Shape::kStrided / kThreads>; + + using Delta = layout::PitchLinearShape<1, 1>; + + using ShapeVec = Shape; + + CUTLASS_HOST_DEVICE + static TensorCoord initial_offset(int thread_id) + { + + return TensorCoord(0, thread_id * Iterations::kStrided); + } +}; + + +//////////////////////////////////////////////////////////////////////////////// + +/// Policy defining a warp-raked arrangement in which a shape is partitioned into contiguous +/// elements. +/// +/// This ThreadMap is used by tensor core kernels. +template < + typename Shape_, + int Threads, + typename WarpThreadArrangement_, + int ElementsPerAccess = 1 +> +struct PitchLinearWarpRakedThreadMap { + + /// Tensor coordinate + using TensorCoord = layout::PitchLinearCoord; + + /// Tile shape + using Shape = Shape_; + + /// Number of threads total + static int const kThreads = Threads; + + /// Extract vector length from Layout + static int const kElementsPerAccess = ElementsPerAccess; + + /// Shape of access by each thread + using ThreadAccessShape = layout::PitchLinearShape; + + /// Internal details made public to facilitate introspection + struct Detail { + + /// Fixed arrangement of threads within a warp (units of threads). + using WarpThreadArrangement = WarpThreadArrangement_; + + /// Number of threads per warp + static int const kWarpSize = WarpThreadArrangement::kCount; + + /// Number of participating warps + static int const kWarpCount = kThreads / kWarpSize; + + static_assert( + !(Shape::kContiguous % kElementsPerAccess), + "Shape must be divisible by vector length."); + + /// Compute the 'shape' of the overall tile in units of vectors + using ShapeInAccesses = layout::PitchLinearShape< + Shape::kContiguous / kElementsPerAccess, + Shape::kStrided + >; + + static_assert( + !(ShapeInAccesses::kContiguous % WarpThreadArrangement::kContiguous), + "ShapeInAccesses must be divisible by WarpThreadArrangement."); + + static_assert( + !(ShapeInAccesses::kStrided % WarpThreadArrangement::kStrided), + "ShapeInAccesses must be divisible by WarpThreadArrangement."); + + // compute number of warp-level accesses total + using WarpAccessIterations = layout::PitchLinearShape< + ShapeInAccesses::kContiguous / WarpThreadArrangement::kContiguous, + ShapeInAccesses::kStrided / WarpThreadArrangement::kStrided + >; + + // Divide it into the number of warps, first partitioning the strided dimension then the + // contiguous. + static int const kWarpsStrided = + (WarpAccessIterations::kStrided >= kWarpCount + ? kWarpCount + : WarpAccessIterations::kStrided); + + static int const kWarpsContiguous = + (kWarpCount > WarpAccessIterations::kStrided + ? kWarpCount / kWarpsStrided + : 1); + + /// Arrangement of warps within a threadblock-scoped tile + using WarpArrangement = layout::PitchLinearShape< + kWarpsContiguous, kWarpsStrided + >; + }; + + ///< Iterations along each dimension (concept: PitchLinearShape) + using Iterations = layout::PitchLinearShape< + Detail::WarpAccessIterations::kContiguous / Detail::kWarpsContiguous, + Detail::WarpAccessIterations::kStrided / Detail::kWarpsStrided + >; + + static_assert(Iterations::kCount, + "Number of iterations must be non-zero"); + + ///< Delta betweeen accesses (units of elements, concept: PitchLinearShape) + using Delta = layout::PitchLinearShape< + Detail::WarpThreadArrangement::kContiguous * kElementsPerAccess, + Detail::WarpThreadArrangement::kStrided + >; + + /// Maps thread ID to a coordinate offset within the tensor's logical coordinate space + CUTLASS_HOST_DEVICE + static TensorCoord initial_offset(int thread_id) { + + int warp_id = (thread_id / Detail::kWarpSize); + int lane_id = (thread_id % Detail::kWarpSize); + + // + // compute warp-level offset + // + + // This is the shape of the entire area covered by a warp's memory access (in units of vectors) + layout::PitchLinearCoord warp_footprint{ + Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous, + Detail::WarpThreadArrangement::kStrided * Iterations::kStrided + }; + + // This is the offset of a specific warp (in units of vectors) + layout::PitchLinearCoord warp_offset{ + (warp_id % Detail::kWarpsContiguous), + (warp_id / Detail::kWarpsContiguous) + }; + + // This is the offset of a specific thread within a warp (units of vectors) + layout::PitchLinearCoord thread_offset_in_warp{ + lane_id % Detail::WarpThreadArrangement::kContiguous, + lane_id / Detail::WarpThreadArrangement::kContiguous + }; + + // This is the offset of a thread within a threadblock tile (units of vectors) + layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec = + warp_footprint * warp_offset + thread_offset_in_warp; + + // This is the offset of a thread within a threadblock tile (units of elements) + layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{ + thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess, + thread_offset_in_threadblock_tile_vec.strided() + }; + + return thread_offset_in_threadblock_tile_base; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Policy defining a warp-raked arrangement in which a shape is partitioned into contiguous +/// elements. Warps are arranged based on a stride. +/// +/// This ThreadMap is used by tensor core kernels for NCxHWx layout. +template < + typename Shape_, + int Threads, + typename WarpThreadArrangement_, + int ElementsPerAccess = 1 +> +struct PitchLinearStridedWarpRakedThreadMap { + + /// Tensor coordinate + using TensorCoord = layout::PitchLinearCoord; + + /// Tile shape + using Shape = Shape_; + + /// Number of threads total + static int const kThreads = Threads; + + using WarpThreadArrangement = WarpThreadArrangement_; + + /// Extract vector length from Layout + static int const kElementsPerAccess = ElementsPerAccess; + + /// Base ThreadMap + using BaseThreadMap = PitchLinearWarpRakedThreadMap< + Shape, + kThreads, + WarpThreadArrangement, + kElementsPerAccess + >; + + /// Shape of access by each thread + using ThreadAccessShape = typename BaseThreadMap::ThreadAccessShape; + + + struct Detail { + + using WarpThreadArrangement = WarpThreadArrangement_; + + using WarpAccessIterations = typename BaseThreadMap::Detail::WarpAccessIterations; + + static int const kWarpSize = BaseThreadMap::Detail::kWarpSize; + + static int const kWarpCount = BaseThreadMap::Detail::kWarpCount; + + using ShapeInAccesses = typename BaseThreadMap::Detail::ShapeInAccesses; + + // Divide it into the number of warps, first partitioning the contiguous dimension then the + // stride. + static int const kWarpsContiguous = + (WarpAccessIterations::kContiguous >= kWarpCount + ? kWarpCount + : WarpAccessIterations::kContiguous); + + static int const kWarpsStrided = + (kWarpCount > WarpAccessIterations::kContiguous + ? kWarpCount / kWarpsContiguous + : 1); + + /// Arrangement of warps within a threadblock-scoped tile + using WarpArrangement = layout::PitchLinearShape< + kWarpsContiguous, kWarpsStrided + >; + + }; + + ///< Iterations along each dimension (concept: PitchLinearShape) + using Iterations = layout::PitchLinearShape< + Detail::WarpAccessIterations::kContiguous / Detail::kWarpsContiguous, + Detail::WarpAccessIterations::kStrided / Detail::kWarpsStrided + >; + + static_assert(Iterations::kCount, + "Number of iterations must be non-zero"); + + ///< Delta betweeen accesses (units of elements, concept: PitchLinearShape) + using Delta = typename BaseThreadMap::Delta; + + /// Maps thread ID to a coordinate offset within the tensor's logical coordinate space + CUTLASS_HOST_DEVICE + static TensorCoord initial_offset(int thread_id) { + + int warp_id = (thread_id / Detail::kWarpSize); + int lane_id = (thread_id % Detail::kWarpSize); + + // + // compute warp-level offset + // + + // This is the shape of the entire area covered by a warp's memory access (in units of vectors) + layout::PitchLinearCoord warp_footprint{ + Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous, + Detail::WarpThreadArrangement::kStrided * Iterations::kStrided + }; + + // This is the offset of a specific warp (in units of vectors) + layout::PitchLinearCoord warp_offset{ + (warp_id % Detail::kWarpsContiguous), + (warp_id / Detail::kWarpsContiguous) + }; + + // This is the offset of a specific thread within a warp (units of vectors) + layout::PitchLinearCoord thread_offset_in_warp{ + lane_id % Detail::WarpThreadArrangement::kContiguous, + lane_id / Detail::WarpThreadArrangement::kContiguous + }; + + // This is the offset of a thread within a threadblock tile (units of vectors) + layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec = + warp_footprint * warp_offset + thread_offset_in_warp; + + // This is the offset of a thread within a threadblock tile (units of elements) + layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{ + thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess, + thread_offset_in_threadblock_tile_vec.strided() + }; + + return thread_offset_in_threadblock_tile_base; + } + + +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Transpose the existing ThreadMap. For example, interleaved layout is like +/// congruous in the global memory and crosswise in the shared memory. We need +/// to transpose the coordinates between two. + +template +struct TransposePitchLinearThreadMap { + /// Underlying ThreadMap + using ThreadMap = ThreadMap_; + + /// Tensor coordinate + using TensorCoord = typename ThreadMap::TensorCoord; + + /// Tile shape + using Shape = typename ThreadMap::Shape; + + /// Number of threads total + static int const kThreads = ThreadMap::kThreads; + + /// Extract vector length from Layout + static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; + + /// Shape of access by each thread + using ThreadAccessShape = layout::PitchLinearShape; + + /// Internal details made public to facilitate introspection + struct Detail { + /// Fixed arrangement of threads within a warp (units of threads). + using WarpThreadArrangement = WarpThreadArrangement_; + + /// Number of threads per warp + static int const kWarpSize = WarpThreadArrangement::kCount; + + /// Number of participating warps + static int const kWarpCount = kThreads / kWarpSize; + + static_assert(!(Shape::kContiguous % kElementsPerAccess), + "Shape must be divisible by vector length."); + + /// Arrangement of warps within a threadblock-scoped tile + using WarpArrangement = + layout::PitchLinearShape; + }; + + ///< Iterations along each dimension (concept: PitchLinearShape) + using Iterations = + layout::PitchLinearShape; + + static_assert(Iterations::kContiguous == 1, + "Contiguous iteration has to be one to reuse the same shared store function with those that don't need transpose"); + + static_assert(Iterations::kCount, "Number of iterations must be non-zero"); + + ///< Delta betweeen accesses (units of elements, concept: PitchLinearShape) + using Delta = + layout::PitchLinearShape; + + /// Maps thread ID to a coordinate offset within the tensor's logical + /// coordinate space Note this is slightly different from the one of + /// PitchLinearWarpRakedThreadMap. + CUTLASS_HOST_DEVICE + static TensorCoord initial_offset(int thread_id) { + + int warp_id = (thread_id / Detail::kWarpSize); + int lane_id = (thread_id % Detail::kWarpSize); + + // + // compute warp-level offset + // + + // This is the shape of the entire area covered by a warp's memory access + // (in units of vectors) + layout::PitchLinearCoord warp_footprint{ + Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous, + Detail::WarpThreadArrangement::kStrided * Iterations::kStrided}; + + // This is the offset of a specific warp (in units of vectors) + // Note the order of / and %. Also the 2nd operand is kStrided. + layout::PitchLinearCoord warp_offset{ + (warp_id / Detail::WarpArrangement::kStrided), + (warp_id % Detail::WarpArrangement::kStrided)}; + + // This is the offset of a specific thread within a warp (units of vectors) + layout::PitchLinearCoord thread_offset_in_warp{ + lane_id % Detail::WarpThreadArrangement::kContiguous, + lane_id / Detail::WarpThreadArrangement::kContiguous}; + + // This is the offset of a thread within a threadblock tile (units of + // vectors) + layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec = + warp_footprint * warp_offset + thread_offset_in_warp; + + // This is the offset of a thread within a threadblock tile (units of + // elements) + layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{ + thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess, + thread_offset_in_threadblock_tile_vec.strided()}; + + return thread_offset_in_threadblock_tile_base; + } +}; + +template +struct TransposePitchLinearThreadMapSimt { + /// Underlying ThreadMap + using ThreadMap = ThreadMap_; + + /// Tensor coordinate + using TensorCoord = typename ThreadMap::TensorCoord; + + /// Tile shape + using Shape = typename ThreadMap::Shape; + + /// Number of threads total + static int const kThreads = ThreadMap::kThreads; + + /// Extract vector length from Layout + static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; + + static_assert(kElementsPerAccess == 1 , "Simt transpose requires elements per access to be 1"); + ///< Iterations along each dimension (concept: PitchLinearShape) + using Iterations = + layout::PitchLinearShape; + + static_assert(Iterations::kCount, "Number of iterations must be non-zero"); + + static_assert(Iterations::kStrided == 1, + "Strided iteration has to be one to reuse the same shared store function with those that don't need transpose"); + + /// Shape of access by each thread + using ThreadAccessShape = typename ThreadMap::ThreadAccessShape; + + ///< Delta betweeen accesses (units of elements, concept: PitchLinearShape) + using Delta = + layout::PitchLinearShape; + + + /// Maps thread ID to a coordinate offset within the tensor's logical + /// coordinate space Note this is slightly different from the one of + /// PitchLinearWarpRakedThreadMap. + CUTLASS_HOST_DEVICE + static TensorCoord initial_offset(int thread_id) { + + TensorCoord coord = ThreadMap::initial_offset(thread_id); + + return TensorCoord( + coord.strided(), + coord.contiguous() + ); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + + +/// Policy defining a warp-striped arrangement. This partitions a tile into vectorized memory +/// accesses performed by each warp then distributes warps across them. Warps are striped in the +/// strided dimension and raked across the contiguous dimension. +template < + typename Shape_, /// Overall shape to partition in units of elements + int Threads, /// Number of partiticipation threads + typename WarpThreadArrangement_, /// Describes the shape of one memory access per warp + int ElementsPerAccess = 1 /// Number of elements accessed by each thread per memory operation (i.e. vector size) +> +struct PitchLinearWarpStripedThreadMap { + + /// Tensor coordinate + using TensorCoord = layout::PitchLinearCoord; + + /// Tile shape + using Shape = Shape_; + + /// Number of threads total + static int const kThreads = Threads; + + /// Extract vector length from Layout + static int const kElementsPerAccess = ElementsPerAccess; + + /// Shape of access by each thread + using ThreadAccessShape = layout::PitchLinearShape; + + /// Internal details made public to facilitate introspection + struct Detail { + + /// Fixed arrangement of threads within a warp (units of threads). + using WarpThreadArrangement = WarpThreadArrangement_; + + /// Number of threads per warp + static int const kWarpSize = WarpThreadArrangement::kCount; + + /// Number of participating warps + static int const kWarpCount = kThreads / kWarpSize; + + static_assert( + !(Shape::kContiguous % kElementsPerAccess), + "Shape must be divisible by vector length."); + + /// Compute the 'shape' of the overall tile in units of vectors + using ShapeInAccesses = layout::PitchLinearShape< + Shape::kContiguous / kElementsPerAccess, + Shape::kStrided + >; + + // compute number of warp-level accesses total + using WarpAccessIterations = layout::PitchLinearShape< + ShapeInAccesses::kContiguous / WarpThreadArrangement::kContiguous, + ShapeInAccesses::kStrided / WarpThreadArrangement::kStrided + >; + + // Divide it into the number of warps, first partitioning the strided dimension then the + // contiguous. + static int const kWarpsStrided = + (WarpAccessIterations::kStrided >= kWarpCount + ? kWarpCount : (kWarpCount / WarpAccessIterations::kStrided)); + + static int const kWarpsContiguous = + (kWarpCount > WarpAccessIterations::kStrided ? + WarpAccessIterations::kContiguous / kWarpsStrided : 1); + + /// Arrangement of warps within a threadblock-scoped tile + using WarpArrangement = layout::PitchLinearShape< + kWarpsContiguous, kWarpsStrided + >; + }; + + ///< Iterations along each dimension (concept: PitchLinearShape) + using Iterations = layout::PitchLinearShape< + Detail::WarpAccessIterations::kContiguous / Detail::kWarpsContiguous, + Detail::WarpAccessIterations::kStrided / Detail::kWarpsStrided + >; + + static_assert(Iterations::kCount, + "Number of iterations must be non-zero"); + + ///< Delta betweeen accesses (units of elements, concept: PitchLinearShape) + using Delta = layout::PitchLinearShape< + Detail::WarpThreadArrangement::kContiguous * kElementsPerAccess, + Detail::WarpThreadArrangement::kStrided * Detail::WarpArrangement::kStrided + >; + + /// Maps thread ID to a coordinate offset within the tensor's logical coordinate space + CUTLASS_HOST_DEVICE + static TensorCoord initial_offset(int thread_id) { + + int warp_id = (thread_id / Detail::kWarpSize); + int lane_id = (thread_id % Detail::kWarpSize); + + // + // compute warp-level offset + // + + // This is the shape of the entire area covered by a warp's memory access (in units of vectors) + layout::PitchLinearCoord warp_footprint{ + Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous, + Detail::WarpThreadArrangement::kStrided + }; + + // This is the offset of a specific warp (in units of vectors) + layout::PitchLinearCoord warp_offset{ + (warp_id % Detail::kWarpsContiguous), + (warp_id / Detail::kWarpsContiguous) + }; + + // This is the offset of a specific thread within a warp (units of vectors) + layout::PitchLinearCoord thread_offset_in_warp{ + lane_id % Detail::WarpThreadArrangement::kContiguous, + lane_id / Detail::WarpThreadArrangement::kContiguous + }; + + // This is the offset of a thread within a threadblock tile (units of vectors) + layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec = + warp_footprint * warp_offset + thread_offset_in_warp; + + // This is the offset of a thread within a threadblock tile (units of elements) + layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{ + thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess, + thread_offset_in_threadblock_tile_vec.strided() + }; + + return thread_offset_in_threadblock_tile_base; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Strip-mines a pitch-linear tile among a given number of threads, first along the contiguous +/// dimension then along the strided dimension, while each thread access a 2D thread-tile. +/// +/// The tile must be divisible by the thread count such that all threads may execute the same +/// number of iterations with the same delta to exhaustively cover the tile. +/// +/// This class satisfies the "RegularThreadMapping" concept. +template < + typename Shape_, + int Threads, + typename ThreadTileShape +> +struct PitchLinear2DThreadTileStripminedThreadMap; + + +template < + typename Shape_, + int Threads +> +struct PitchLinear2DThreadTileStripminedThreadMap >{ + + /// Tensor coordinate + using TensorCoord = layout::PitchLinearCoord; + + /// Tile shape + using Shape = Shape_; + + /// Access Shape of each thread + using ThreadAccessShape = cutlass::layout::PitchLinearShape<4, 4>; + //using ThreadAccessShape = ThreadTileShape; + + /// Number of threads total + static int const kThreads = Threads; + + /// Extract length of each access from Layout + static int const kElementsPerAccess = ThreadAccessShape::kContiguous; + + static_assert(!(kElementsPerAccess % 4) , "kElementsPerAccess, needs to be multiple of 4 (32bits)"); + + /// Internal implementation details + struct Detail { + + static_assert(!(ThreadAccessShape::kContiguous % 4), "ThreadAccessShape, needs to be multiple of 4"); + + static_assert(!(Shape::kContiguous % ThreadAccessShape::kContiguous), ""); + + static_assert(!((Shape::kContiguous * Shape::kStrided) % (kThreads * ThreadAccessShape::kCount)), + "Shape must be divisible thread count * accesses per thread."); + + /// Shape of the tile in units of vectors + using ShapeVec = layout::PitchLinearShape< + Shape::kContiguous / ThreadAccessShape::kContiguous, + Shape::kStrided / ThreadAccessShape::kStrided + >; + + static_assert( + (Threads < ShapeVec::kContiguous && !(ShapeVec::kContiguous % kThreads)) || + (!(kThreads % ShapeVec::kContiguous) && !(ShapeVec::kStrided % (kThreads / ShapeVec::kContiguous))), + "Shape must be divisible by number of iterations of each thread." + ); + }; + + /// Number of iterations by each thread + using Iterations = typename platform::conditional< + Threads >= Detail::ShapeVec::kContiguous, + layout::PitchLinearShape< + 1, + // Redo the comparison here to work around divide by zero compiler + // error. The compiler evaluates both path of platform::conditional. + (Threads >= Detail::ShapeVec::kContiguous + ? Detail::ShapeVec::kStrided / + (kThreads / Detail::ShapeVec::kContiguous) + : 0)>, + layout::PitchLinearShape>::type; + + /// Interval between accesses along each dimension of the tensor's logical coordinate space + /// (in units of Elements) + using Delta = typename platform::conditional< + Threads >= Detail::ShapeVec::kContiguous, + layout::PitchLinearShape< + Shape::kContiguous, + kThreads * ThreadAccessShape::kStrided / Detail::ShapeVec::kContiguous + >, + layout::PitchLinearShape< + kThreads * ThreadAccessShape::kContiguous, + 1 + > + >::type; + + /// Maps thread ID to a coordinate offset within the tensor's logical coordinate space + /// (in units of Elements) + CUTLASS_HOST_DEVICE + static TensorCoord initial_offset(int thread_id) { + + return TensorCoord( + (thread_id % Detail::ShapeVec::kContiguous) * ThreadAccessShape::kContiguous, + (thread_id / Detail::ShapeVec::kContiguous) * ThreadAccessShape::kStrided); + } +}; + +/// Thread Mapping a 2D threadtiled mapping as a transposed Pitchlinear2DThreadTile mapping +template +struct TransposePitchLinearThreadMap2DThreadTile { + /// Underlying ThreadMap + using ThreadMap = ThreadMap_; + + /// Tensor coordinate + using TensorCoord = typename ThreadMap::TensorCoord; + + /// Tile shape + using Shape = typename ThreadMap::Shape; + + /// Number of threads total + static int const kThreads = ThreadMap::kThreads; + + /// Extract vector length from Layout + static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; + + + static_assert(kElementsPerAccess > 1 , "Simt transpose requires elements per access to be 1"); + ///< Iterations along each dimension (concept: PitchLinearShape) + using Iterations = + layout::PitchLinearShape; + + static_assert(Iterations::kCount, "Number of iterations must be non-zero"); + + /// Shape of access by each thread + using ThreadAccessShape = typename ThreadMap::ThreadAccessShape; + + ///< Delta betweeen accesses (units of elements, concept: PitchLinearShape) + using Delta = + layout::PitchLinearShape; + + + /// Maps thread ID to a coordinate offset within the tensor's logical + /// coordinate space Note this is slightly different from the one of + /// PitchLinearWarpRakedThreadMap. + CUTLASS_HOST_DEVICE + static TensorCoord initial_offset(int thread_id) { + + TensorCoord coord = ThreadMap::initial_offset(thread_id); + return TensorCoord( + coord.strided(), + coord.contiguous() + ); + } +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/thread/transpose.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/thread/transpose.h new file mode 100644 index 0000000000000000000000000000000000000000..b62b6bfe5c6c30ac1d4fbbc82b46768a58e101ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/thread/transpose.h @@ -0,0 +1,107 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Basic copy routines for tensor views +*/ + +#pragma once + +namespace cutlass { +namespace transform { +namespace thread { + +/// Transforms a fragment by doing a transpose +template < + int ElementCount, + typename TransposeShape, + typename Element +> struct Transpose; + +/// Specialization for int8_t 4x4 transpose +template +struct Transpose , int8_t> { + + static const int kElementCount = ElementCount_; + using TransposeShape = layout::PitchLinearShape<4,4>; + using Element = int8_t; + using Fragment = cutlass::Array; + + static_assert(!(kElementCount % TransposeShape::kCount), "Shape needs to be multiple of 16 elements to do a 4x4 transpose"); + + CUTLASS_DEVICE + void transform(Fragment& dst, Fragment& src) { + + // Expose src/dst as int arrays. + int* src_int = reinterpret_cast(&src); + int* dst_int = reinterpret_cast(&dst); + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kElementCount / TransposeShape::kCount; i++){ + + int const i0 = 4 * i + 0; + int const i1 = 4 * i + 1; + int const i2 = 4 * i + 2; + int const i3 = 4 * i + 3; + + int a0 = src_int[i0]; + int a1 = src_int[i1]; + int a2 = src_int[i2]; + int a3 = src_int[i3]; + + int b0, b1, b2, b3, c0; + b0 = __byte_perm(a0, a1, 0x0040); + c0 = __byte_perm(a2, a3, 0x0040); + b0 = __byte_perm(b0, c0, 0x5410); + + b1 = __byte_perm(a0, a1, 0x0051); + c0 = __byte_perm(a2, a3, 0x0051); + b1 = __byte_perm(b1, c0, 0x5410); + + b2 = __byte_perm(a0, a1, 0x0062); + c0 = __byte_perm(a2, a3, 0x0062); + b2 = __byte_perm(b2, c0, 0x5410); + + b3 = __byte_perm(a0, a1, 0x0073); + c0 = __byte_perm(a2, a3, 0x0073); + b3 = __byte_perm(b3, c0, 0x5410); + + dst_int[i0] = b0; + dst_int[i1] = b1; + dst_int[i2] = b2; + dst_int[i3] = b3; + } + } +}; + +} // namespace thread +} // namespace layout +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/thread/unary_op.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/thread/unary_op.h new file mode 100644 index 0000000000000000000000000000000000000000..c50e75b34656e2a6821473c8e64a209dcbcbda0c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/thread/unary_op.h @@ -0,0 +1,105 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/complex.h" + +namespace cutlass { +namespace transform { +namespace thread { + +namespace UnaryTransform { + struct Identity; ///< None (i.e., identity) + struct Conjugate; ///< Complex conjugate +} + +/// Element-wise unary operator that transforms one element of a fragment at a time +template< + typename FragmentIn, ///< Input Fragment + typename FragmentOut,///< Output Fragment + typename Transform> ///< Unary transform operator +class UnaryOp +{ + public: + CUTLASS_DEVICE + static FragmentOut execute(FragmentIn &in) + { + static_assert(FragmentIn::kElements == FragmentOut::kElements, "Number of elements must match."); + static_assert(platform::is_same::value || + platform::is_same::value, + "Unary Operator not supported."); + + FragmentOut out; + if (platform::is_same::value ) + { + CUTLASS_PRAGMA_UNROLL + for (int i=0; i < FragmentIn::kElements; ++i){ + out[i] = static_cast(in[i]); + } + } + else if (platform::is_same::value ) + { + for (int i=0; i < FragmentIn::kElements; ++i){ + out[i] = conj(static_cast(in[i])); + } + } + return out; + } +}; + +template +class UnaryOp +{ + public: + CUTLASS_DEVICE + static FragmentIn execute(FragmentIn &in) + { + static_assert(platform::is_same::value || + platform::is_same::value, + "Unary Operator not supported."); + + if (platform::is_same::value ) + { + return in; + } + else if (platform::is_same::value ) + { + for(int i=0; i < FragmentIn::kElements; ++i){ + in[i] = conj(in[i]); + } + } + return in; + } + }; + } + } +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/ell_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/ell_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..05781236b77789e36233fb22416702e104dceaed --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/ell_iterator.h @@ -0,0 +1,199 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Ell iterator for matrix of indices (ellColInd matrix) +*/ + +#pragma once + +namespace cutlass { +namespace transform { +namespace threadblock { + +namespace ell{ + +constexpr unsigned int SmemPow = 8; +constexpr unsigned int SmemStages = 2; +constexpr unsigned int SmemSize = 1 << SmemPow; +constexpr unsigned int SmemMask = (SmemSize*SmemStages-1); + +class SharedStorage{ + public: + Array array; +}; + +class Iterator{ + public: + using Layout = layout::PitchLinear; + using LongIndex = typename Layout::LongIndex; + + private: + const int *gmem_col_idx_; + int *smem_col_idx_; + const int block_size_; + const int base_idx_; + const int k_shape_; + const int ell_increment_; + const int array_length_; + int col_idx_base_; + int residue_; + int counter_; + + int pow2_; + int residue_shape_; + + int smem_offset_; + int smem_stage_; + int gmem_offset_; + + int lane_; + + bool is_pow2_; + bool is_residue_tile_; + + public: + CUTLASS_DEVICE + void load_ell_indices(){ + for(int i=threadIdx.x; i= 0) ? gmem_col_idx : -1; + } + gmem_offset_ += SmemSize; + smem_stage_ ^= 1; + } + + CUTLASS_DEVICE + Iterator( + SharedStorage& shared_storage_base, + const int* col_idx, + const int& block_size, + const int& base_idx, + const int k_shape, + const int& problem_size_k, + const int& ell_stride, + const int& thread_idx) + : residue_(0), + counter_(0), + smem_offset_(0), + smem_stage_(0), + gmem_offset_(0), + block_size_(block_size), + base_idx_(base_idx), + k_shape_(k_shape), + ell_increment_(ell_stride * block_size), + array_length_((problem_size_k + block_size_ - 1) / block_size_), + residue_shape_(problem_size_k % k_shape_), + is_residue_tile_(residue_shape_ != 0), + smem_col_idx_(reinterpret_cast(&shared_storage_base.array)), + gmem_col_idx_(const_cast(col_idx)), + lane_(thread_idx % 32) { + + load_ell_indices(); + __syncthreads(); + + is_pow2_ = ((block_size_ & (block_size_ - 1)) == 0); + if( is_pow2_ && k_shape <= block_size_ ) lane_ = 0; + + col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_; + + pow2_ = 0; + while(block_size_ >> (pow2_ + 1)) ++pow2_; + } + + CUTLASS_DEVICE + int get_blocksize(){ + return block_size_; + } + + CUTLASS_DEVICE + Iterator &operator++(){ + if(is_residue_tile_){ + residue_ += residue_shape_; + is_residue_tile_ = false; + } else { + residue_ += k_shape_; + } + + if(residue_ < block_size_){ + return *this; + } + + if((array_length_ > SmemSize) && (((smem_offset_ >> SmemPow) & 1) != smem_stage_)) + load_ell_indices(); + + if(residue_ == block_size_){ + ++smem_offset_; + counter_ += ell_increment_; + residue_ = 0; + col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_ - counter_; + return *this; + } + + if(is_pow2_){ + smem_offset_ += residue_ >> pow2_; + counter_ += (residue_ >> pow2_) * ell_increment_; + residue_ = residue_ & ((1 << pow2_) - 1); + } + else { + smem_offset_ += residue_ / block_size_; + counter_ += (residue_ / block_size_) * ell_increment_; + residue_ %= block_size_; + } + + col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_ - counter_; + + return *this; + } + + CUTLASS_DEVICE + LongIndex get_offset(const int& idx) { + int num_jump_tiles; + if(is_pow2_) + num_jump_tiles = (idx + residue_) >> pow2_; + else + num_jump_tiles = (idx + residue_) / block_size_; + + int tmp = __shfl_sync(0xffffffff, col_idx_base_, num_jump_tiles); + return tmp - num_jump_tiles * ell_increment_; + } + + CUTLASS_DEVICE + LongIndex get_offset_fast() { + return col_idx_base_; + } +}; + +} +} +} +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/ell_predicated_tile_access_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/ell_predicated_tile_access_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..9eec17e95e14e07a7fac1d194b29f75f42d0b98c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/ell_predicated_tile_access_iterator.h @@ -0,0 +1,1350 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Ell iterator for Blocked-Ell matrix (ellValue matrix) used with EllMmaMultistage +*/ + +#pragma once + +#include "cutlass/array.h" +#include "cutlass/coord.h" +#include "cutlass/cutlass.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/predicate_vector.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/tensor_view.h" + +//////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// EllPredicatedTileAccessIterator +/// +template +class EllPredicatedTileAccessIterator; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of EllPredicatedTileAccessIterator for pitch-linear data. +/// +template +class EllPredicatedTileAccessIterator { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; + + static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), + "Vectors implied by the thread map must be divisible by the access type."); + + static int const kPredicatesPerByte = 4; + static int const kPredicatesPerWord = 4 * kPredicatesPerByte; + + static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector; + + /// Number of 32b words containing predicates + static int const kPredicateByteCount = + (kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte; + static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4; + + static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u; + + static_assert(kPredicateWordCount <= 4, "Too many predicates."); + + /// Predicate vector stores mask to guard accesses + using Mask = Array; + + /// Parameters object is precomputed state and is host-constructible + class Params { + public: + friend EllPredicatedTileAccessIterator; + + private: + /// stride of pitch-linear layout (units of Element) + LongIndex stride_; + /// amount (in byte) to increment pointer to move to next access along + /// strided dimension + LongIndex inc_strided_; + /// amount (in byte) to increment pointer from last access to first access + /// of next tile + LongIndex inc_next_; + /// amount (in byte) to increment pointer from first access of current tile + /// to first access of next tile + LongIndex inc_advance_; + + public: + + // Default ctor + CUTLASS_HOST_DEVICE + Params(): stride_(0), inc_strided_(0), inc_next_(0), inc_advance_(0) { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) : stride_(layout.stride(0)) { + inc_strided_ = (LongIndex(stride_) * ThreadMap::Delta::kStrided) * + sizeof_bits::value / 8; + + if (kAdvanceRank) { + // advance along strided dimension + inc_advance_ = + Shape::kStrided * LongIndex(stride_) * sizeof_bits::value / 8; + } else { + // advance along contiguous dimension + inc_advance_ = Shape::kContiguous * sizeof_bits::value / 8; + } + + inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kStrided - 1) * + ThreadMap::Delta::kStrided * LongIndex(stride_) * + sizeof_bits::value / 8; + }; + }; + + private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char *; + + private: + // + // Data members + // + + /// Parameters object with precomputed internal state + Params const ¶ms_; + + /// Internal pointer to first access of tile + BytePointer pointer_; + + /// Guard predicates + uint32_t predicates_[kPredicateWordCount]; + + /// Size of tensor + TensorCoord extent_; + + /// Initial offset for each thread + TensorCoord thread_offset_; + + /// Offset to the first steady-state tile + TensorCoord residue_offset_; + + /// Initial offset to define ELL block + TensorCoord ell_offset_; + + /// Used for out-of-order visitation + bool is_residue_tile_; + + /// Iteration along vectors implied by the thread map + int iteration_vector_; + + /// Iteration in the contiguous dimension + int iteration_contiguous_; + + /// Iteration in the strided dimension + int iteration_strided_; + + public: + /// Computes predicates based on internally tracked per-thread offset. + CUTLASS_DEVICE + void compute_predicates_( + /// Extent of the matrix window + TensorCoord extent, + /// optionally, simplify predicate calculation during 'steady state' phase + bool is_steady_state = false) { + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = 0u; + } + + CUTLASS_PRAGMA_UNROLL + for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) { + + int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector); + + int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector); + + int c = access_residual / kAccessesPerVector; + int v = access_residual % kAccessesPerVector; + + TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements, + s * ThreadMap::Delta::kStrided); + + TensorCoord coord = thread_offset_ + iteration_coord; + + bool guard; + + if (is_steady_state) { + if (kAdvanceRank == 0) { + guard = (coord.strided() < extent.strided()); + } else { + guard = (coord.contiguous() < extent.contiguous()); + } + } else { + guard = (coord.strided() < extent.strided() && + coord.contiguous() < extent.contiguous()); + } + + int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s); + + int word_idx = pred_idx / kPredicatesPerWord; + int residual = pred_idx % kPredicatesPerWord; + int byte_idx = residual / kPredicatesPerByte; + int bit_idx = residual % kPredicatesPerByte; + + predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx)); + + } + + } + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset) + : params_(params), + pointer_(reinterpret_cast( + const_cast(pointer))), + extent_(extent), + is_residue_tile_(true) { + + TensorCoord residue_extent; + if (kAdvanceRank) { + + typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.strided()) % Shape::kStrided; + if (!residue_size) { + residue_size = Shape::kStrided; + } + + residue_offset_ = make_Coord(0, residue_size); + residue_extent = make_Coord( + extent_.contiguous(), + min(threadblock_offset.strided() + residue_size, extent_.strided()) + ); + } else { + + typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.contiguous()) % Shape::kContiguous; + if (!residue_size) { + residue_size = Shape::kContiguous; + } + + residue_offset_ = make_Coord(residue_size, 0); + + residue_extent = make_Coord( + min(extent_.contiguous(), threadblock_offset.contiguous() + residue_size), + extent_.strided() + ); + } + + // Per-thread offset in logical coordinates of tensor + ell_offset_ = ThreadMap::initial_offset(thread_id); + thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id); + + // update internal pointers + Layout layout(params_.stride_); + add_pointer_offset(layout(thread_offset_)); + + compute_predicates_(residue_extent, false); + + set_iteration_index(0); + } + + /// Construct a EllPredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id) + : EllPredicatedTileAccessIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + + iteration_vector_ = index % kAccessesPerVector; + int residual_access = index / kAccessesPerVector; + + iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous; + iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous; + + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + pointer_ += sizeof_bits::value * pointer_offset / 8; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + void add_tile_offset( + TensorCoord const &tile_offset) { + if (is_residue_tile_) { + + thread_offset_ += residue_offset_; + + Layout layout(params_.stride_); + add_pointer_offset(layout(residue_offset_)); + + compute_predicates_(extent_, true); + + if (kAdvanceRank) { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided() - 1); + pointer_ += Shape::kContiguous * tile_offset.contiguous(); + } else { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous() - 1); + pointer_ += Shape::kStrided * tile_offset.strided(); + } + } else { + if (kAdvanceRank) { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided()); + pointer_ += Shape::kContiguous * tile_offset.contiguous(); + } else { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous()); + pointer_ += Shape::kStrided * tile_offset.strided(); + } + } + is_residue_tile_ = false; + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast( + pointer_ + + iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits::value) / 8) + iteration_vector_; + } + + /// Returns a k_location + CUTLASS_HOST_DEVICE + int get_k() const { + if(kAdvanceRank){ //strided + return ell_offset_.strided() + iteration_strided_ * ThreadMap::Delta::kStrided; + }else{ + return ell_offset_.contiguous() + iteration_contiguous_ * ThreadMap::Delta::kContiguous + iteration_vector_ * AccessType::kElements; + } + } + + CUTLASS_HOST_DEVICE + int get_stride() const { + if(kAdvanceRank) + return params_.stride_; + else + return 1; + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator &operator++() { + + ++iteration_vector_; + if (iteration_vector_ < kAccessesPerVector) { + return *this; + } + + iteration_vector_ = 0; + ++iteration_contiguous_; + + if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { + return *this; + } + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + iteration_contiguous_ = 0; + ++iteration_strided_; + + if (iteration_strided_ < ThreadMap::Iterations::kStrided) { + pointer_ += params_.inc_strided_; + return *this; + } + + // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + iteration_strided_ = 0; + + // advance to next tile + pointer_ += params_.inc_next_; + + // now return to start tile - if the iterator is subsequently advanced, this + // subtraction as well as the subsequent integer addition are both elided by + // the compiler. + pointer_ -= params_.inc_advance_; + + return *this; + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator operator++(int) { + EllPredicatedTileAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = enable ? 0u : predicates_[i]; + } + + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = 0xffffffff; + } + + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = mask[i]; + } + + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + mask[i] = predicates_[i]; + } + } + + /// add mask for small tiles in ELL + CUTLASS_DEVICE + void ell_add_mask(int blocksize) { + + Mask mask; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + mask[i] = 0u; + } + + CUTLASS_PRAGMA_UNROLL + for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) { + + int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector); + + int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector); + + int c = access_residual / kAccessesPerVector; + int v = access_residual % kAccessesPerVector; + + TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements, + s * ThreadMap::Delta::kStrided); + + TensorCoord coord = ell_offset_ + iteration_coord; + + bool guard; + + if (kAdvanceRank == 0) { + guard = (coord.strided() < blocksize); + } else { + guard = (coord.contiguous() < blocksize); + } + + int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s); + + int word_idx = pred_idx / kPredicatesPerWord; + int residual = pred_idx % kPredicatesPerWord; + int byte_idx = residual / kPredicatesPerByte; + int bit_idx = residual % kPredicatesPerByte; + + mask[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx)); + + } + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + mask[i] &= predicates_[i]; + } + set_mask(mask); + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + + int pred_idx = + iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous); + + int word_idx = pred_idx / kPredicatesPerWord; + int residual = pred_idx % kPredicatesPerWord; + int byte_idx = residual / kPredicatesPerByte; + int bit_idx = residual % kPredicatesPerByte; + + bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0; + return pred; + + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of EllPredicatedTileAccessIterator for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class EllPredicatedTileAccessIterator { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = EllPredicatedTileAccessIterator< + layout::PitchLinearShape, Element, + layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend EllPredicatedTileAccessIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0))){}; + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator( + ///< Precomputed parameters object + Params const ¶ms, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const &threadblock_offset) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.row(), extent.column()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row(), + threadblock_offset.column())) {} + + /// Construct a EllPredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : EllPredicatedTileAccessIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + CUTLASS_HOST_DEVICE + int get_k() const { + return iterator_.get_k(); + } + + CUTLASS_HOST_DEVICE + int get_stride() const { + return iterator_.get_stride(); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator operator++(int) { + EllPredicatedTileAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// add mask for small tiles in ELL + CUTLASS_DEVICE + void ell_add_mask(int blocksize) { + iterator_.ell_add_mask(blocksize); + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return iterator_.valid(); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of EllPredicatedTileAccessIterator for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class EllPredicatedTileAccessIterator { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = EllPredicatedTileAccessIterator< + layout::PitchLinearShape, Element, + layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend EllPredicatedTileAccessIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0))){}; + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator( + ///< Precomputed parameters object + Params const ¶ms, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const &threadblock_offset) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), + threadblock_offset.row())) {} + + /// Construct a EllPredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : EllPredicatedTileAccessIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + CUTLASS_HOST_DEVICE + int get_k() const { + return iterator_.get_k(); + } + + CUTLASS_HOST_DEVICE + int get_stride() const { + return iterator_.get_stride(); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator operator++(int) { + EllPredicatedTileAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// add mask for small tiles in ELL + CUTLASS_DEVICE + void ell_add_mask(int blocksize) { + iterator_.ell_add_mask(blocksize); + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return iterator_.valid(); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of EllPredicatedTileAccessIterator for column-major interleaved data. +/// It is mapped to the congruous layout. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// + +template +class EllPredicatedTileAccessIterator, + AdvanceRank, ThreadMap_, AccessType_> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + static int const kInterleavedK = InterleavedK; + using Layout = layout::ColumnMajorInterleaved; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = EllPredicatedTileAccessIterator< + layout::PitchLinearShape, + Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, + AccessType>; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend EllPredicatedTileAccessIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0))) {} + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.row() * kInterleavedK, + extent.column() / kInterleavedK), + thread_id, + layout::PitchLinearCoord( + threadblock_offset.row() * kInterleavedK, + threadblock_offset.column() / kInterleavedK)) {} + + /// Construct a EllPredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : EllPredicatedTileAccessIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + CUTLASS_HOST_DEVICE + int get_k() const { + return iterator_.get_k(); + } + + CUTLASS_HOST_DEVICE + int get_stride() const { + return iterator_.get_stride(); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator operator++(int) { + EllPredicatedTileAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// add mask for small tiles in ELL + CUTLASS_DEVICE + void ell_add_mask(int blocksize) { + iterator_.ell_add_mask(blocksize); + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { return iterator_.valid(); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of EllPredicatedTileAccessIterator for row-major interleaved data. +/// It is mapped to the congruous layout. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class EllPredicatedTileAccessIterator, + AdvanceRank, ThreadMap_, AccessType_> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + static int const kInterleavedK = InterleavedK; + using Layout = layout::RowMajorInterleaved; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = EllPredicatedTileAccessIterator< + layout::PitchLinearShape, + Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, + AccessType>; + + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend EllPredicatedTileAccessIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0))) {} + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.column() * kInterleavedK, + extent.row() / kInterleavedK), + thread_id, + layout::PitchLinearCoord( + threadblock_offset.column() * kInterleavedK, + threadblock_offset.row() / kInterleavedK)) {} + + /// Construct a EllPredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : EllPredicatedTileAccessIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + CUTLASS_HOST_DEVICE + int get_k() const { + return iterator_.get_k(); + } + + CUTLASS_HOST_DEVICE + int get_stride() const { + return iterator_.get_stride(); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileAccessIterator operator++(int) { + EllPredicatedTileAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// add mask for small tiles in ELL + CUTLASS_DEVICE + void ell_add_mask(int blocksize) { + iterator_.ell_add_mask(blocksize); + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { return iterator_.valid(); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/ell_predicated_tile_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/ell_predicated_tile_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..f9847334bc7df34120a4c93bcb1bfc4bbf9d8d44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/ell_predicated_tile_iterator.h @@ -0,0 +1,1315 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Ell iterator for Blocked-Ell matrix (ellValue matrix) used with EllMmaPipelined +*/ + +#pragma once + +#include "cutlass/arch/memory.h" +#include "cutlass/transform/threadblock/predicated_tile_access_iterator.h" + +#include "cutlass/transform/threadblock/ell_predicated_tile_access_iterator.h" +#include "cutlass/transform/threadblock/ell_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// EllPredicatedTileIterator +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +/// Regular tile iterator using a precomputed control structure to minimize register liveness +/// and integer arithmetic. +/// +/// Layout is assumed to be invariant at the time the precomputed "Params" object is constructed. +/// +/// Base pointer and tensor extents may be specified at the time the iterator is constructed. +/// Subsequently, they are assumed to be immutable. +/// +/// Adding a logical coordinate offset may be performed at the time the iterator is constructed. +/// Subsequent additions to logical coordinate offset may be performed but are relatively expensive. +/// +/// Visitation order is intended to first visit a "residual" tile that may be partially full in +/// both the advance dimension and the steady-state dimension. This is assumed to be the last +/// tile in the iteration sequence. Advancing an iterator that has just been constructed moves to +/// the first tile that is full in the advance dimension and recomputes predicates. Subsequent +/// accesses may be performed without updating internal predicates and are efficient in terms of +/// live register state and pointer arithmetic instructions. +/// +/// To be efficient, this assumes the iterator will be dereferenced and advanced at least once +/// outside any looping structure to minimize integer arithmetic. +/// +/// Acceses out of bounds are safe so long as `clear_mask()` is called prior to dereferencing +/// the iterator. +/// +/// +/// Example: +/// +/// An efficient pipeline structure may be constructed as follows: +/// +// template +// __global__ void kernel( +// typename Iterator::Params params, +// typename Iterator::Element *ptr, +// TensorCoord extent) { +// +// typename Iterator::Fragment fragment; +// +// TensorCoord threadblock_offset(0, 0); +// +// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets); +// +// +// fragment = *iter; // load "residue" tile first +// ++iter; // advance to first "steady state" tile and update internal masks +// +// +// #pragma unroll +// for (int i = Remaining - 1; i >= 0; --i) { +// +// f(fragment); +// +// if (!i) { +// iter.clear_mask(); // light-weight operation to clear masks - subsequent loads become NO-OPs. +// } +// +// fragment = *iter; // load tile during "steady state" phase +// ++iter; // advance to next tile - lightweight due to steady-state masks +// } +// } +// +// void host(TensorView view) { +// +// using Iterator = transform::threadblock::EllPredicatedTileIterator; +// +// typename Iterator::Params params(view.layout()); +// +// kernel(params, view.data()); +// } +/// +/// +template < + typename Shape, + typename Element, + typename Layout, + int AdvanceRank, + typename ThreadMap, + int AccessSize = ThreadMap::kElementsPerAccess +> +class EllPredicatedTileIterator; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of EllPredicatedTileIterator for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class EllPredicatedTileIterator { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + /// Type used for internal memory accesses + using AccessType = AlignedArray::value / 8)>; + + /// Underlying iterator to compute the addresses + using TileAccessIterator = + EllPredicatedTileAccessIterator; + + static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename TileAccessIterator::Mask; + + /// Iterator for ELL storage + using EllIterator = typename cutlass::transform::threadblock::ell::Iterator; + + /// Parameters object is precomputed state and is host-constructible + class Params { + public: + friend EllPredicatedTileIterator; + + private: + /// Parameters object + typename TileAccessIterator::Params params_; + + public: + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) : params_(layout) { } + + CUTLASS_HOST_DEVICE + Params() { } + }; + + private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char *; + + private: + // + // Data members + // + + /// Data member to the tile access iterator + TileAccessIterator address_iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset) + : address_iterator_(params.params_, pointer, extent, thread_id, + threadblock_offset) {} + + /// Construct a EllPredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : EllPredicatedTileIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + address_iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator &operator++() { + if (kAdvanceRank) + address_iterator_.add_tile_offset({0, 1}); + else + address_iterator_.add_tile_offset({1, 0}); + + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator operator++(int) { + EllPredicatedTileIterator self(*this); + operator++(); + return self; + } + + /// Returns a stride + CUTLASS_HOST_DEVICE + int get_stride() const { return address_iterator_.get_stride(); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { address_iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { address_iterator_.get_mask(mask); } + + /// add mask for small tiles in ELL + CUTLASS_HOST_DEVICE + void ell_add_mask(int blocksize) { address_iterator_.ell_add_mask(blocksize); } + + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + load_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + CUTLASS_DEVICE + void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) { + + AccessType *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < kAccessesPerVector; ++v) { + + int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); + + address_iterator_.set_iteration_index(idx); + char const *byte_ptr = reinterpret_cast(address_iterator_.get()) + byte_offset; + + AccessType const *access_ptr = reinterpret_cast(byte_ptr); + + cutlass::arch::global_load( + frag_ptr[idx], access_ptr, address_iterator_.valid()); + + ++address_iterator_; + } + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_byte_offset(frag, 0); } + + CUTLASS_DEVICE + void load_with_ell_index(Fragment &frag, EllIterator &ell_iter) { + + AccessType *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < kAccessesPerVector; ++v) { + + int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); + address_iterator_.set_iteration_index(idx); + LongIndex ell_offset = 0; + + int k_offset = address_iterator_.get_k(); + ell_offset = ell_iter.get_offset(k_offset) * sizeof(Element); + + char const *byte_ptr = reinterpret_cast(address_iterator_.get()) + ell_offset; + + AccessType const *access_ptr = reinterpret_cast(byte_ptr); + + bool is_valid = address_iterator_.valid(); + is_valid = is_valid && (ell_offset >= 0); + + cutlass::arch::global_load( + frag_ptr[idx], access_ptr, is_valid); + + ++address_iterator_; + } + } + } + } + + CUTLASS_DEVICE + void load_with_ell_index_fast(Fragment &frag, EllIterator &ell_iter) { + + LongIndex ell_offset = ell_iter.get_offset_fast() * sizeof(Element); + + AccessType *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < kAccessesPerVector; ++v) { + + int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); + + address_iterator_.set_iteration_index(idx); + char const *byte_ptr = reinterpret_cast(address_iterator_.get()) + ell_offset; + + AccessType const *access_ptr = reinterpret_cast(byte_ptr); + + bool is_valid = address_iterator_.valid(); + is_valid = is_valid && (ell_offset >= 0); + + cutlass::arch::global_load( + frag_ptr[idx], access_ptr, is_valid); + + ++address_iterator_; + } + } + } + } + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + store_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) { + address_iterator_.set_iteration_index(0); + AccessType const *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < kAccessesPerVector; ++v) { + + int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); + + char *byte_ptr = reinterpret_cast(address_iterator_.get()) + byte_offset; + AccessType *access_ptr = reinterpret_cast(byte_ptr); + + if (address_iterator_.valid()) { + *access_ptr = frag_ptr[idx]; + } + ++address_iterator_; + } + } + } + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_byte_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of EllPredicatedTileIterator for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int AccessSize +> +class EllPredicatedTileIterator { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = EllPredicatedTileIterator< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap, + AccessSize + >; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Iterator for ELL storage + using EllIterator = typename cutlass::transform::threadblock::ell::Iterator; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + + friend EllPredicatedTileIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + CUTLASS_HOST_DEVICE + Params() { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) { + + } + }; + + +private: + + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + + /// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + TensorCoord const &threadblock_offset ///< Initial offset of threadblock + ): + iterator_( + params.params_, + pointer, + layout::PitchLinearCoord(extent.row(), extent.column()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()) + ) { } + + /// Construct a EllPredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ): EllPredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator operator++(int) { + EllPredicatedTileIterator self(*this); + operator++(); + return self; + } + + /// Returns a stride + CUTLASS_HOST_DEVICE + int get_stride() const { return iterator_.get_stride(); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + iterator_.clear_mask(enable); + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + iterator_.enable_mask(); + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + iterator_.set_mask(mask); + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + iterator_.get_mask(mask); + } + + /// add mask for small tiles in ELL + CUTLASS_HOST_DEVICE + void ell_add_mask(int blocksize) { + iterator_.ell_add_mask(blocksize); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + CUTLASS_DEVICE + void load_with_ell_index(Fragment &frag, EllIterator& ell_iter) { + iterator_.load_with_ell_index(frag, ell_iter); + } + + CUTLASS_DEVICE + void load_with_ell_index_fast(Fragment &frag, EllIterator& ell_iter) { + iterator_.load_with_ell_index_fast(frag, ell_iter); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) { + iterator_.store_with_byte_offset(frag, byte_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of EllPredicatedTileIterator for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int AccessSize +> +class EllPredicatedTileIterator { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = EllPredicatedTileIterator< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap, + AccessSize + >; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Iterator for ELL storage + using EllIterator = typename cutlass::transform::threadblock::ell::Iterator; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + + friend EllPredicatedTileIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + CUTLASS_HOST_DEVICE + Params() { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) { + + }; + }; + + +private: + + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + + /// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + TensorCoord const &threadblock_offset ///< Initial offset of threadblock + ): + iterator_( + params.params_, + pointer, + layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()) + ) { } + + /// Construct a EllPredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ): EllPredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator operator++(int) { + EllPredicatedTileIterator self(*this); + operator++(); + return self; + } + + /// Returns a stride + CUTLASS_HOST_DEVICE + int get_stride() const { return iterator_.get_stride(); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + iterator_.clear_mask(enable); + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + iterator_.enable_mask(); + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + iterator_.set_mask(mask); + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + iterator_.get_mask(mask); + } + + /// add mask for small tiles in ELL + CUTLASS_HOST_DEVICE + void ell_add_mask(int blocksize) { + iterator_.ell_add_mask(blocksize); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + CUTLASS_DEVICE + void load_with_ell_index(Fragment &frag, EllIterator& ell_iter) { + iterator_.load_with_ell_index(frag, ell_iter); + } + + CUTLASS_DEVICE + void load_with_ell_index_fast(Fragment &frag, EllIterator& ell_iter) { + iterator_.load_with_ell_index_fast(frag, ell_iter); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) { + iterator_.store_with_byte_offset(frag, byte_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of EllPredicatedTileIterator for interleaved data. It is mapped +/// to the congruous layout. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// + +template +class EllPredicatedTileIterator, + AdvanceRank, ThreadMap_, AccessSize> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + static int const kInterleavedK = InterleavedK; + using Layout = layout::ColumnMajorInterleaved; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = EllPredicatedTileIterator< + layout::PitchLinearShape, + Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessSize>; + + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Iterator for ELL storage + using EllIterator = typename cutlass::transform::threadblock::ell::Iterator; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend EllPredicatedTileIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0))) {} + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.row() * kInterleavedK, + extent.column() / kInterleavedK), + thread_id, + layout::PitchLinearCoord( + threadblock_offset.row() * kInterleavedK, + threadblock_offset.column() / kInterleavedK)) {} + + /// Construct a EllPredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : EllPredicatedTileIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator operator++(int) { + EllPredicatedTileIterator self(*this); + operator++(); + return self; + } + + /// Returns a stride + CUTLASS_HOST_DEVICE + int get_stride() const { return iterator_.get_stride(); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// add mask for small tiles in ELL + CUTLASS_HOST_DEVICE + void ell_add_mask(int blocksize) { iterator_.ell_add_mask(blocksize); } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + CUTLASS_DEVICE + void load_with_ell_index(Fragment &frag, EllIterator& ell_iter) { + iterator_.load_with_ell_index(frag, ell_iter); + } + + CUTLASS_DEVICE + void load_with_ell_index_fast(Fragment &frag, EllIterator& ell_iter) { + iterator_.load_with_ell_index_fast(frag, ell_iter); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of EllPredicatedTileIterator for interleaved-32 data. It is +/// mapped to the congruous layout. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class EllPredicatedTileIterator, + AdvanceRank, ThreadMap_, AccessSize> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + static int const kInterleavedK = InterleavedK; + using Layout = layout::RowMajorInterleaved; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = EllPredicatedTileIterator< + layout::PitchLinearShape, + Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessSize>; + + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend EllPredicatedTileIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0))) {} + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.column() * kInterleavedK, + extent.row() / kInterleavedK), + thread_id, + layout::PitchLinearCoord( + threadblock_offset.column() * kInterleavedK, + threadblock_offset.row() / kInterleavedK)) {} + + /// Construct a EllPredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : EllPredicatedTileIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + EllPredicatedTileIterator operator++(int) { + EllPredicatedTileIterator self(*this); + operator++(); + return self; + } + + /// Returns a stride + CUTLASS_HOST_DEVICE + int get_stride() const { return iterator_.get_stride(); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// add mask for small tiles in ELL + CUTLASS_HOST_DEVICE + void ell_add_mask(int blocksize) { iterator_.ell_add_mask(blocksize); } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_access_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_access_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..61bed18aef58e3cda99a58dcb4b0fda37a5f1c5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_access_iterator.h @@ -0,0 +1,375 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Templates calculating the address and predicates to the load of scale and bias vectors. + + This iterator uses masks to guard out-of-bounds accesses. + + It can be used to load the gamma and beta vectors of layernorm which is loop variant. + + A precomputed "Params" object minimizes the amount of state that must be + stored in registers, and integer addition is used to advance the pointer + through memory. +*/ + +#pragma once + +#include "cutlass/array.h" +#include "cutlass/coord.h" +#include "cutlass/cutlass.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/predicate_vector.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/tensor_view.h" +#include "cutlass/conv/threadblock/conv2d_params.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// PredicatedScaleBiasVectorAccessIterator +/// +template +class PredicatedScaleBiasVectorAccessIterator; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for fprop pitch-linear data. +/// +template +class PredicatedScaleBiasVectorAccessIterator { + public: + + using ThreadblockShape = ThreadblockShape_; + using Element = Element_; + using Layout = layout::PitchLinear; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using ConstPointer = const Element *; + using NonConstPointer = typename platform::remove_const::type *; + + static int const kElementsPerAccess = 128 / sizeof_bits::value; + static int const kThreads = ThreadblockShape::kContiguous / kElementsPerAccess; + + using AccessType = AlignedArray; + + private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char *; + + private: + // + // Data members + // + + /// Internal pointer to first access of tile + BytePointer pointer_; + + TensorCoord thread_offset_; + + int problem_size_k_; + + /// Used for out-of-order visitation + bool is_residue_tile_; + + bool guard_; + + TensorCoord::Index residue_size_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedScaleBiasVectorAccessIterator( + /// Extent of tensor + int problem_size_k, + /// Pointer to the start of the scale vector + ConstPointer scale_pointer, + /// Pointer to the start of the bias vector + ConstPointer bias_pointer, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset) { + pointer_ = (thread_id < kThreads) + ? reinterpret_cast( + const_cast(scale_pointer)) + : reinterpret_cast( + const_cast(bias_pointer)); + + // Per-thread offset in logical coordinates of tensor + int thread_base = (thread_id < kThreads) ? 0 : kThreads; + + problem_size_k_ = problem_size_k; + + is_residue_tile_ = true; + + residue_size_ = (problem_size_k_ - threadblock_offset.contiguous()) % ThreadblockShape::kContiguous; + + if (residue_size_ == 0) { + residue_size_ = ThreadblockShape::kContiguous; + } + + guard_ = ((thread_id - thread_base) * kElementsPerAccess) < residue_size_; + + thread_offset_ = + threadblock_offset + + TensorCoord((thread_id - thread_base) * kElementsPerAccess, 0); + + set_iteration_index(0); + } + + /// Construct a PredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedScaleBiasVectorAccessIterator( + /// Extent of tensor + int problem_size_k, + /// Pointer to start of scale vector + ConstPointer scale_pointer, + /// Pointer to start of scale vector + ConstPointer bias_pointer, + ///< ID of each participating thread + int thread_id) + : PredicatedScaleBiasVectorAccessIterator(problem_size_k, + scale_pointer, bias_pointer, + thread_id, make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) {} + + /// Advances an iterator along logical dimensions of matrix in units of whole threadblock tiles + CUTLASS_DEVICE + void add_tile_offset( + TensorCoord const &tile_offset) { + + guard_ = threadIdx.x < kThreads * 2; + + TensorCoord offset = is_residue_tile_ ? + TensorCoord(residue_size_ + ThreadblockShape::kContiguous * (tile_offset.contiguous() - 1), 0) + : TensorCoord(ThreadblockShape::kContiguous * tile_offset.contiguous(), 0); + + thread_offset_ = + thread_offset_ + + offset; + + is_residue_tile_ = false; + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + + return reinterpret_cast( + pointer_ + + (thread_offset_.contiguous() * sizeof_bits::value / 8)); + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + PredicatedScaleBiasVectorAccessIterator &operator++() { + return *this; + } + + /// Increment and return an instance to self. + CUTLASS_DEVICE + PredicatedScaleBiasVectorAccessIterator operator++(int) { + PredicatedScaleBiasVectorAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + guard_ &= (!enable); + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return guard_; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for row-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedScaleBiasVectorAccessIterator { + public: + + using ThreadblockShape = ThreadblockShape_; + using Element = Element_; + using Layout = layout::RowMajor; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using ConstPointer = const Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedScaleBiasVectorAccessIterator< + layout::PitchLinearShape, + Element, + layout::PitchLinear>; + + using AccessType = typename UnderlyingIterator::AccessType; + static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedScaleBiasVectorAccessIterator( + ///< Extent of tensor + int problem_size_k, + ///< Pointer to the start of the scale vector + ConstPointer scale_pointer, + ///< Pointer to the start of the bias vector + ConstPointer bias_pointer, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const &threadblock_offset) + : iterator_(problem_size_k, scale_pointer, bias_pointer, + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), + threadblock_offset.row())) {} + + /// Construct a PredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedScaleBiasVectorAccessIterator( + int problem_size_k, ///< Extent of tensor + ConstPointer scale_pointer, ///< Pointer to the start of the scale vector + ConstPointer bias_pointer, ///< Pointer to the start of the bias vector + int thread_id ///< ID of each participating thread + ) + : PredicatedScaleBiasVectorAccessIterator(problem_size_k, + scale_pointer, bias_pointer, + thread_id, make_Coord(0, 0)) {} + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// threadblock tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedScaleBiasVectorAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedScaleBiasVectorAccessIterator operator++(int) { + PredicatedScaleBiasVectorAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + iterator_.clear_mask(enable); + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return iterator_.valid(); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..fb08930eaa19b9446a5b84501b210c778233ffb5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h @@ -0,0 +1,328 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Templates calculating the address and predicates to the load of scale and bias vectors. + + This iterator uses masks to guard out-of-bounds accesses. + + This can be used to load var and mean vectors in layernorm which is loop invariant. + + A precomputed "Params" object minimizes the amount of state that must be + stored in registers, and integer addition is used to advance the pointer + through memory. +*/ + +#pragma once + +#include "cutlass/array.h" +#include "cutlass/coord.h" +#include "cutlass/cutlass.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/predicate_vector.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/tensor_view.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// PredicatedScaleBiasVectorIterator +/// +template +class PredicatedScaleBiasVectorIterator; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIterator for wgrad pitch-linear data. +/// +template +class PredicatedScaleBiasVectorIterator { + public: + + using WarpShape = WarpShape_; + using Element = Element_; + using Layout = layout::PitchLinear; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using ConstPointer = const Element *; + using NonConstPointer = typename platform::remove_const::type *; + + static int const kElementsPerAccess = 1; + + using AccessType = AlignedArray; + + static int const kIterations = WarpShape::kContiguous / 8; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array<__half2, 2 * kIterations * kElementsPerAccess>; + + private: + // + // Data members + // + + /// Internal pointer to first access of tile + ConstPointer scale_pointer_; + ConstPointer bias_pointer_; + + /// Size of tensor + int problem_size_; + + int32_t thread_offset_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedScaleBiasVectorIterator( + /// Extent of tensor + int problem_size, + /// Pointer to the start of the scale vector + ConstPointer scale_pointer, + /// Pointer to the start of the bias vector + ConstPointer bias_pointer, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset) + : problem_size_(problem_size), + scale_pointer_(scale_pointer), + bias_pointer_(bias_pointer) { + + thread_offset_ = threadblock_offset.contiguous() + (thread_id % 32) / 4; + } + + /// Construct a PredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedScaleBiasVectorIterator( + /// Extent of tensor + int problem_size, + /// Pointer to start of scale vector + ConstPointer scale_pointer, + /// Pointer to start of scale vector + ConstPointer bias_pointer, + ///< ID of each participating thread + int thread_id) + : PredicatedScaleBiasVectorIterator(problem_size, + scale_pointer, bias_pointer, + thread_id, make_Coord(0, 0)) {} + + /// Advances an iterator along logical dimensions of matrix in units of whole warp tiles + CUTLASS_DEVICE + void add_tile_offset( + TensorCoord const &tile_offset) { + + thread_offset_ += (WarpShape::kContiguous * tile_offset.contiguous()); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + + frag.fill(__float2half2_rn(0.0f)); + __half2 *frag_ptr = reinterpret_cast<__half2 *>(&frag); + + // load scale + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < kIterations; ++c) { + + cutlass::arch::global_load< + __half, + sizeof(AccessType) + >( + frag_ptr[c * 2].x, + scale_pointer_ + thread_offset_ + c * 8, + (thread_offset_ + c * 8) < problem_size_ + ); + } + + // load bias + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < kIterations; ++c) { + + cutlass::arch::global_load< + __half, + sizeof(AccessType) + >( + frag_ptr[c * 2 + 1].x, + bias_pointer_ + thread_offset_ + c * 8, + (thread_offset_ + c * 8) < problem_size_ + ); + } + + // duplicate scale + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < kIterations; ++c) { + frag_ptr[c * 2].y = frag_ptr[c * 2].x; + } + + // duplicate bias + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < kIterations; ++c) { + frag_ptr[c * 2 + 1].y = frag_ptr[c * 2 + 1].x; + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIterator for row-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedScaleBiasVectorIterator { + public: + + using WarpShape = WarpShape_; + using Element = Element_; + using Layout = layout::RowMajor; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using ConstPointer = const Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedScaleBiasVectorIterator< + layout::PitchLinearShape, + Element, + layout::PitchLinear>; + + using AccessType = typename UnderlyingIterator::AccessType; + static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess; + using Fragment = typename UnderlyingIterator::Fragment; + + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedScaleBiasVectorIterator( + ///< Extent of tensor + int problem_size, + ///< Pointer to the start of the scale vector + ConstPointer scale_pointer, + ///< Pointer to the start of the bias vector + ConstPointer bias_pointer, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const &threadblock_offset) + : iterator_(problem_size, scale_pointer, bias_pointer, + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), + threadblock_offset.row())) {} + + /// Construct a PredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedScaleBiasVectorIterator( + int problem_size, ///< Extent of tensor + ConstPointer scale_pointer, ///< Pointer to the start of the scale vector + ConstPointer bias_pointer, ///< Pointer to the start of the bias vector + int thread_id ///< ID of each participating thread + ) + : PredicatedScaleBiasVectorIterator(problem_size, + scale_pointer, bias_pointer, + thread_id, make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// threadblock tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + iterator_.load(frag); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..25454197ca3e6d97882a3e437f89ff3c0366f93c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator.h @@ -0,0 +1,2118 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates calculating the address and predicates to the load of tiles + from pitch-linear rank=2 tensors. + + This iterator uses masks to guard out-of-bounds accesses. The first tile this + iterator visits maybe partial, then the remaining tiles are complete. So, we + only need to compute the predicates twice, once before the first tile and + once for the remaining full tiles which can share the same predicates. + + A precomputed "Params" object minimizes the amount of state that must be + stored in registers, and integer addition is used to advance the pointer + through memory. +*/ + +#pragma once + +#include "cutlass/array.h" +#include "cutlass/coord.h" +#include "cutlass/cutlass.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/permute.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/predicate_vector.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/tensor_view.h" +#include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h" + +//////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// PredicatedTileAccessIteratorPredicates +/// +template +class PredicatedTileAccessIteratorPredicates { + public: + using Shape = Shape_; + using Element = Element_; + using Layout = Layout_; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorCoord = typename Layout::TensorCoord; + + static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; + + static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), + "Vectors implied by the thread map must be divisible by the access type."); + + static int const kPredicatesPerByte = 4; + static int const kPredicatesPerWord = 4 * kPredicatesPerByte; + + static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector; + + /// Number of 32b words containing predicates + static int const kPredicateByteCount = + (kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte; + static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4; + + static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u; + + static_assert(kPredicateWordCount <= 4, "Too many predicates."); + + /// Predicate vector stores mask to guard accesses + using Mask = Array; + +// private: + /// Guard predicates + uint32_t predicates_[kPredicateWordCount]; + + /// Size of tensor + TensorCoord extent_; + + /// Initial offset for each thread + TensorCoord thread_offset_; + + /// Offset to the first steady-state tile + TensorCoord residue_offset_; + + /// Iteration along vectors implied by the thread map + int iteration_vector_; + + /// Iteration in the contiguous dimension + int iteration_contiguous_; + + /// Iteration in the strided dimension + int iteration_strided_; + + public: + /// Computes predicates based on internally tracked per-thread offset. + CUTLASS_DEVICE + void compute_predicates_( + /// Extent of the matrix window + TensorCoord extent, + /// optionally, simplify predicate calculation during 'steady state' phase + bool is_steady_state = false) { + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = 0u; + } + + CUTLASS_PRAGMA_UNROLL + for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) { + + int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector); + + int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector); + + int c = access_residual / kAccessesPerVector; + int v = access_residual % kAccessesPerVector; + + TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements, + s * ThreadMap::Delta::kStrided); + + TensorCoord coord = thread_offset_ + iteration_coord; + + bool guard; + + if (is_steady_state) { + if (kAdvanceRank == 0) { + guard = (coord.strided() < extent.strided()); + } else { + guard = (coord.contiguous() < extent.contiguous()); + } + } else { + guard = (coord.strided() < extent.strided() && + coord.contiguous() < extent.contiguous()); + } + + int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s); + + int word_idx = pred_idx / kPredicatesPerWord; + int residual = pred_idx % kPredicatesPerWord; + int byte_idx = residual / kPredicatesPerByte; + int bit_idx = residual % kPredicatesPerByte; + + predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx)); + + } + + } + + CUTLASS_HOST_DEVICE + void set_predicates(int thread_id, TensorCoord const &threadblock_offset) { + + TensorCoord residue_extent; + if (kAdvanceRank) { + + typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.strided()) % Shape::kStrided; + if (!residue_size) { + residue_size = Shape::kStrided; + } + + residue_offset_ = make_Coord(0, residue_size); + residue_extent = make_Coord( + extent_.contiguous(), + min(threadblock_offset.strided() + residue_size, extent_.strided()) + ); + } else { + + typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.contiguous()) % Shape::kContiguous; + if (!residue_size) { + residue_size = Shape::kContiguous; + } + + residue_offset_ = make_Coord(residue_size, 0); + + residue_extent = make_Coord( + min(extent_.contiguous(), threadblock_offset.contiguous() + residue_size), + extent_.strided() + ); + } + + // Per-thread offset in logical coordinates of tensor + thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id); + + compute_predicates_(residue_extent, false); + + set_iteration_index(0); + } + + /// Default constructor + PredicatedTileAccessIteratorPredicates() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorPredicates( + /// Extent of tensor + TensorCoord extent) + : extent_(extent) { + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + + iteration_vector_ = index % kAccessesPerVector; + int residual_access = index / kAccessesPerVector; + + iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous; + iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous; + + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorPredicates &operator++() { + + return *this; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = enable ? 0u : predicates_[i]; + } + + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = 0xffffffff; + } + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = mask[i]; + } + + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + mask[i] = predicates_[i]; + } + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() const { + + + int pred_idx = + iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous); + + int word_idx = pred_idx / kPredicatesPerWord; + int residual = pred_idx % kPredicatesPerWord; + int byte_idx = residual / kPredicatesPerByte; + int bit_idx = residual % kPredicatesPerByte; + + bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0; + return pred; + + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// PredicatedTileAccessIterator +/// +template +class PredicatedTileAccessIterator; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for pitch-linear data. +/// +template +class PredicatedTileAccessIterator { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates< + Shape, Element, Layout, AdvanceRank, ThreadMap, AccessType>; + + static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; + + static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), + "Vectors implied by the thread map must be divisible by the access type."); + + static bool constexpr Permute = !platform::is_same::value + && !platform::is_same>::value; + + using Mask = typename UnderlyingPredicates::Mask; + + /// Uses a non-template class + struct Params : PredicatedTileAccessIteratorParams { + + using Base = PredicatedTileAccessIteratorParams; + + /// Default constructor + Params() = default; + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) : + Base(layout.stride(0), + MakePredicatedTileAccessIteratorDesc()() + ) { } + + CUTLASS_HOST_DEVICE + Params(Base const &base) : + Base(base) { } + }; + + private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char *; + + private: + // + // Data members + // + + UnderlyingPredicates the_predicates; + + /// Parameters object with precomputed internal state + Params params_; + + /// Internal pointer to first access of tile + BytePointer pointer_; + + /// Used for out-of-order visitation + bool is_residue_tile_; + + /// Below is used when Gather is turned on. We need to record strided_offset + /// and contiguous_offset separated to compute the offset by using + /// + /// offset = contiguous_offset + indices[strided_offset] + + /// Gather indices + int const *indices_; + + /// Function to perform layout permutation and offset computation + PermuteLayout permute_layout_; + + /// Tracks thread's coordinate offset in the matrix for current tile. + /// This is only used in the following cases: + /// - when Gather is true, strided coordinate needed to access indices (contiguous offset is tracked via pointer_) + /// - when Permute is true, both coordinates are neeeded as input into permutation function (pointer_ is fixed) + TensorCoord coord_offset_; + + private: + /// Computes predicates based on internally tracked per-thread offset. + CUTLASS_DEVICE + void compute_predicates_( + /// Extent of the matrix window + TensorCoord extent, + /// optionally, simplify predicate calculation during 'steady state' phase + bool is_steady_state = false) { + the_predicates.compute_predicates_(extent, is_steady_state); + } + + public: + + /// Default constructor + PredicatedTileAccessIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset, + /// Gather indices + int const *indices = nullptr) + : params_(params), + pointer_(reinterpret_cast( + const_cast(pointer))), + the_predicates(extent), + is_residue_tile_(true), + indices_(indices), + permute_layout_(TensorCoord(extent.contiguous(), extent.strided()), params.stride_) { + + the_predicates.set_predicates(thread_id, threadblock_offset); + + if (Gather) { + assert(indices_); + } + + // update internal pointers + Layout layout(params_.stride_); + + if (!Gather && !Permute) { + add_pointer_offset(layout(the_predicates.thread_offset_)); + } else { + coord_offset_ = the_predicates.thread_offset_; + if (!Permute) { + add_pointer_offset(layout(make_Coord(coord_offset_.contiguous(), 0))); + } + } + } + + /// Construct a PredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id) + : PredicatedTileAccessIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + the_predicates.set_iteration_index(index); + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + pointer_ += sizeof_bits::value * pointer_offset / 8; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + void add_tile_offset( + TensorCoord const &tile_offset) { + if (is_residue_tile_) { + + the_predicates.thread_offset_ += the_predicates.residue_offset_; + + the_predicates.compute_predicates_(the_predicates.extent_, true); + + Layout layout(params_.stride_); + + if (!Gather && !Permute) { + add_pointer_offset(layout(the_predicates.residue_offset_)); + + if (kAdvanceRank) { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided() - 1); + pointer_ += Shape::kContiguous * tile_offset.contiguous() * sizeof_bits::value / 8; + } else { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous() - 1); + pointer_ += Shape::kStrided * tile_offset.strided() * sizeof_bits::value / 8; + } + } else { + coord_offset_.strided() = the_predicates.thread_offset_.strided() + Shape::kStrided * (tile_offset.strided() - kAdvanceRank); + if (!Permute) { + add_pointer_offset(layout(make_Coord(the_predicates.residue_offset_.contiguous(), 0))); + add_pointer_offset(Shape::kContiguous * (tile_offset.contiguous() - (1 - kAdvanceRank))); + } else { + coord_offset_.contiguous() = the_predicates.thread_offset_.contiguous() + Shape::kContiguous * (tile_offset.contiguous() - (1 - kAdvanceRank)); + } + } + } else { + if (!Gather && !Permute) { + if (kAdvanceRank) { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided()); + pointer_ += Shape::kContiguous * tile_offset.contiguous(); + } else { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous()); + pointer_ += Shape::kStrided * tile_offset.strided(); + } + } else { + coord_offset_.strided() += Shape::kStrided * tile_offset.strided(); + if (!Permute) { + add_pointer_offset(Shape::kContiguous * tile_offset.contiguous()); + } else { + coord_offset_.contiguous() += Shape::kContiguous * tile_offset.contiguous(); + } + } + } + + is_residue_tile_ = false; + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + + if (Gather || Permute) + { + if (!valid()) { + return nullptr; + } + + Index coord_contig = (Permute ? coord_offset_.contiguous() : 0) + the_predicates.iteration_contiguous_ * ThreadMap::Delta::kContiguous + the_predicates.iteration_vector_ * AccessType::kElements; + Index coord_strided = coord_offset_.strided() + the_predicates.iteration_strided_ * ThreadMap::Delta::kStrided; + if (Gather) { + coord_strided = indices_[coord_strided]; + } + + LongIndex offset = Permute ? permute_layout_(TensorCoord(coord_contig, coord_strided)) : (coord_strided * LongIndex(params_.stride_) + coord_contig); + return reinterpret_cast(pointer_ + OffsetBytes(offset)); + } + + return reinterpret_cast( + pointer_ + + the_predicates.iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits::value) / 8) + the_predicates.iteration_vector_; + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator &operator++() { + + the_predicates.operator++(); + + ++the_predicates.iteration_vector_; + if (the_predicates.iteration_vector_ < kAccessesPerVector) { + return *this; + } + + the_predicates.iteration_vector_ = 0; + ++the_predicates.iteration_contiguous_; + + if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { + return *this; + } + + // Enter here only if (iteration_contiguous_ == ThreadMap::Iteration::kContiguous) + the_predicates.iteration_contiguous_ = 0; + ++the_predicates.iteration_strided_; + + if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) { + if (!Gather && !Permute) { + pointer_ += params_.inc_strided_; + } + + return *this; + } + + // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + the_predicates.iteration_strided_ = 0; + + if (!Gather && !Permute) { + // advance to next tile + pointer_ += params_.inc_next_; + + // now return to start tile - if the iterator is subsequently advanced, this + // subtraction as well as the subsequent integer addition are both elided by + // the compiler. + pointer_ -= params_.inc_advance_; + } + + return *this; + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator operator++(int) { + PredicatedTileAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + the_predicates.clear_mask(enable); + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + the_predicates.enable_mask(); + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + the_predicates.set_mask(mask); + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + the_predicates.get_mask(mask); + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() const { + return the_predicates.valid(); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for column-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIterator { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileAccessIterator< + layout::PitchLinearShape, Element, + layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType, + Gather, PermuteLayout>; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default constructor + Params() = default; + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0))){}; + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const &base) + : params_(base) {} + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + + /// Default constructor + PredicatedTileAccessIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + ///< Precomputed parameters object + Params const ¶ms, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const &threadblock_offset, + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.row(), extent.column()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row(), + threadblock_offset.column()), + indices) {} + + /// Construct a PredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator operator++(int) { + PredicatedTileAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return iterator_.valid(); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for row-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIterator { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileAccessIterator< + layout::PitchLinearShape, Element, + layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType, + Gather, PermuteLayout>; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default constructor + Params() = default; + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0))){}; + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const &base) + : params_(base) {} + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + + /// Default constructor + PredicatedTileAccessIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + ///< Precomputed parameters object + Params const ¶ms, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const &threadblock_offset, + /// Gather indices + int const *indices = nullptr) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), + threadblock_offset.row()), + indices) {} + + /// Construct a PredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator operator++(int) { + PredicatedTileAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return iterator_.valid(); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for affine rank 2 data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIterator, + AdvanceRank, ThreadMap_, AccessType_, false, + layout::NoPermute> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::AffineRankN<2>; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates< + Shape, Element, layout::PitchLinear, AdvanceRank, ThreadMap, AccessType>; + + static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; + + static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), + "Vectors implied by the thread map must be divisible by the access type."); + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingPredicates::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + public: + friend PredicatedTileAccessIterator; + + private: + /// stride of pitch-linear layout (units of Element) + Coord stride_; + /// amount (in byte) to increment pointer to move to next access along + /// contiguous dimension + LongIndex inc_contiguous_; + /// amount (in byte) to increment pointer from first access of current + /// contiguous dimension to first access of next one. + LongIndex inc_strided_; + /// amount (in byte) to increment pointer from last access of current + /// contiguous dimension to first access of next one. + LongIndex inc_next_strided_; + /// amount (in byte) to increment pointer from last access to first access + /// of next tile + LongIndex inc_next_; + /// amount (in byte) to increment pointer from first access of current tile + /// to first access of next tile + LongIndex inc_advance_; + + public: + + // Default ctor + CUTLASS_HOST_DEVICE + Params(): stride_(0), inc_contiguous_(0), inc_strided_(0), inc_next_(0), inc_advance_(0) { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) : stride_({layout.stride(0), layout.stride(1)}) { + inc_contiguous_ = (LongIndex(stride_[0]) * ThreadMap::Delta::kContiguous) * + sizeof_bits::value / 8; + + inc_strided_ = (LongIndex(stride_[1]) * ThreadMap::Delta::kStrided) * + sizeof_bits::value / 8; + + inc_next_strided_ = inc_strided_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_; + + if (kAdvanceRank) { + // advance along strided dimension + inc_advance_ = + Shape::kStrided * LongIndex(stride_[1]) * sizeof_bits::value / 8; + } else { + // advance along contiguous dimension + inc_advance_ = Shape::kContiguous * stride_[0] * sizeof_bits::value / 8; + } + + inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_ - LongIndex(ThreadMap::Iterations::kStrided - 1) * inc_strided_; + }; + }; + + private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char *; + + // + // Data members + // + + /// Parameters object with precomputed internal state + Params params_; + + /// Internal pointer to first access of tile + BytePointer pointer_; + + UnderlyingPredicates the_predicates; + + /// Used for out-of-order visitation + bool is_residue_tile_; + + private: + /// Computes predicates based on internally tracked per-thread offset. + CUTLASS_DEVICE + void compute_predicates_( + /// Extent of the matrix window + TensorCoord extent, + /// optionally, simplify predicate calculation during 'steady state' phase + bool is_steady_state = false) { + the_predicates.compute_predicates_(extent, is_steady_state); + } + + public: + + /// Default constructor + PredicatedTileAccessIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + ///< Precomputed parameters object + Params const ¶ms, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const &threadblock_offset, + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ) + : params_(params), + pointer_(reinterpret_cast( + const_cast(pointer))), + the_predicates(extent), + is_residue_tile_(true) { + + the_predicates.set_predicates(thread_id, threadblock_offset); + + // update internal pointers + Layout layout(params_.stride_); + add_pointer_offset(layout(the_predicates.thread_offset_)); + } + + /// Construct a PredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { the_predicates.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + pointer_ += sizeof_bits::value * pointer_offset / 8; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + if (is_residue_tile_) { + + the_predicates.thread_offset_ += the_predicates.residue_offset_; + + Layout layout(params_.stride_); + add_pointer_offset(layout(the_predicates.residue_offset_)); + + the_predicates.compute_predicates_(the_predicates.extent_, true); + + if (kAdvanceRank) { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1] - 1); + pointer_ += Shape::kContiguous * tile_offset[0]; + } else { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0] - 1); + pointer_ += Shape::kStrided * tile_offset[1]; + } + } else { + if (kAdvanceRank) { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1]); + pointer_ += Shape::kContiguous * tile_offset[0]; + } else { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0]); + pointer_ += Shape::kStrided * tile_offset[1]; + } + } + is_residue_tile_ = false; + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(pointer_) + the_predicates.iteration_vector_; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator &operator++() { + the_predicates.operator++(); + ++the_predicates.iteration_vector_; + if (the_predicates.iteration_vector_ < kAccessesPerVector) { + return *this; + } + + the_predicates.iteration_vector_ = 0; + ++the_predicates.iteration_contiguous_; + + if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { + pointer_ += params_.inc_contiguous_; + return *this; + } + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + the_predicates.iteration_contiguous_ = 0; + ++the_predicates.iteration_strided_; + + if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) { + pointer_ += params_.inc_next_strided_; + return *this; + } + + // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + the_predicates.iteration_strided_ = 0; + + // advance to next tile + pointer_ += params_.inc_next_; + + // now return to start tile - if the iterator is subsequently advanced, this + // subtraction as well as the subsequent integer addition are both elided by + // the compiler. + pointer_ -= params_.inc_advance_; + + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator operator++(int) { + PredicatedTileAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { the_predicates.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { the_predicates.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { the_predicates.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return the_predicates.valid(); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for affine rank 2 column-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIterator { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::AffineRank2ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + // Map to the underlying AffineRankN<2> layout + using UnderlyingIterator = PredicatedTileAccessIterator< + layout::PitchLinearShape, Element, + layout::AffineRankN<2>, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default constructor + Params() = default; + + /// Construct the Params object given an AffineRankN<2> tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1))){}; + }; + + private: + // + // Data members + // + + /// Underlying AffineRankN<2> tile iterator + UnderlyingIterator iterator_; + + public: + + /// Default constructor + PredicatedTileAccessIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + ///< Precomputed parameters object + Params const ¶ms, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const &threadblock_offset, + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.row(), extent.column()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row(), + threadblock_offset.column())) {} + + /// Construct a PredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset(make_Coord(tile_offset.row(), tile_offset.column())); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator operator++(int) { + PredicatedTileAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return iterator_.valid(); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for affine rank-2 row-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIterator { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::AffineRank2RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + // Map to the underlying AffineRankN<2> layout + using UnderlyingIterator = PredicatedTileAccessIterator< + layout::PitchLinearShape, Element, + layout::AffineRankN<2>, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default constructor + Params() = default; + + /// Construct the Params object given an AffineRankN<2> tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))){}; + }; + + private: + // + // Data members + // + + /// Underlying AffineRankN<2> tile iterator + UnderlyingIterator iterator_; + + public: + + /// Default constructor + PredicatedTileAccessIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + ///< Precomputed parameters object + Params const ¶ms, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const &threadblock_offset, + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), + threadblock_offset.row())) {} + + /// Construct a PredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset(make_Coord(tile_offset.column(), tile_offset.row())); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator operator++(int) { + PredicatedTileAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return iterator_.valid(); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for column-major interleaved data. +/// It is mapped to the congruous layout. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// + +template +class PredicatedTileAccessIterator, + AdvanceRank, ThreadMap_, AccessType_, false, + layout::NoPermute> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + static int const kInterleavedK = InterleavedK; + using Layout = layout::ColumnMajorInterleaved; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileAccessIterator< + layout::PitchLinearShape, + Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, + AccessType>; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default constructor + Params() = default; + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0))) {} + + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const &base) + : params_(base) {} + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + + /// Default constructor + PredicatedTileAccessIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset, + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.row() * kInterleavedK, + extent.column() / kInterleavedK), + thread_id, + layout::PitchLinearCoord( + threadblock_offset.row() * kInterleavedK, + threadblock_offset.column() / kInterleavedK)) {} + + /// Construct a PredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator operator++(int) { + PredicatedTileAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { return iterator_.valid(); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for row-major interleaved data. +// It is mapped to the congruous layout. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIterator, + AdvanceRank, ThreadMap_, AccessType_, false, + layout::NoPermute> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + static int const kInterleavedK = InterleavedK; + using Layout = layout::RowMajorInterleaved; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileAccessIterator< + layout::PitchLinearShape, + Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, + AccessType>; + + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default constructor + Params() = default; + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0))) {} + + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const &base) + : params_(base) {} + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + + /// Default constructor + PredicatedTileAccessIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset, + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.column() * kInterleavedK, + extent.row() / kInterleavedK), + thread_id, + layout::PitchLinearCoord( + threadblock_offset.column() * kInterleavedK, + threadblock_offset.row() / kInterleavedK)) {} + + /// Construct a PredicatedTileAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator operator++(int) { + PredicatedTileAccessIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { return iterator_.valid(); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h new file mode 100644 index 0000000000000000000000000000000000000000..1ce5e39dcef5ddcbb5b3b0e1560654a257c23fcb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h @@ -0,0 +1,834 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates calculating the address and predicates to the load of tiles + from pitch-linear rank=2 tensors. + + This iterator uses masks to guard out-of-bounds accesses and visits the last + "residue" tile first, with the objective of minimizing predicate mask updates + during steady-state operation. + + A precomputed "Params" object minimizes the amount of state that must be + stored in registers, and integer addition is used to advance the pointer + through memory. +*/ + +#pragma once + +#include "cutlass/array.h" +#include "cutlass/coord.h" +#include "cutlass/cutlass.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/predicate_vector.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/tensor_view.h" +#include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h" + +//////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// PredicatedTileAccessIterator2dThreadTile +/// +template +class PredicatedTileAccessIterator2dThreadTile; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data. +/// +template +class PredicatedTileAccessIterator2dThreadTile { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + static int const kPredicatesPerByte = 4; + static int const kPredicatesPerWord = 4 * kPredicatesPerByte; + + /// Number of 32b words containing predicates + static int const kPredicateByteCount = (ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kStrided + kPredicatesPerByte - 1) / kPredicatesPerByte; + static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4; + + static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u; + + static_assert(kPredicateWordCount <= 4, "Too many predicates."); + + /// Predicate vector stores mask to guard accesses + using Mask = Array; + + /// Uses a non-template class + struct Params : PredicatedTileAccessIteratorParams { + + public: + friend PredicatedTileAccessIterator2dThreadTile; + + using Base = PredicatedTileAccessIteratorParams; + + // Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) : + Base(layout.stride(0), + MakePredicatedTileAccessIteratorDesc()() + ) { } + + CUTLASS_HOST_DEVICE + Params(Base const &base) : + Base(base) { } + }; + + + private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char *; + + private: + // + // Data members + // + + /// Parameters object with precomputed internal state + Params const ¶ms_; + + /// Internal pointer to first access of tile + BytePointer pointer_; + + /// Guard predicates + uint32_t predicates_[kPredicateWordCount]; + + /// Size of tensor + TensorCoord extent_; + + /// Initial offset for each thread + TensorCoord thread_offset_; + + /// Index of residue tile + int residue_tile_idx_; + + /// Used for out-of-order visitation + bool is_residue_tile_; + + /// Iteration in the contiguous dimension + int iteration_contiguous_; + + /// Iteration in the strided dimension + int iteration_strided_; + + /// Tracks iterations within the thread loop + int iteration_thread_; + + private: + /// Computes predicates based on internally tracked per-thread offset. + CUTLASS_HOST_DEVICE + void compute_predicates_( + /// optionally, simplify predicate calculation during 'steady state' phase + bool is_steady_state = false) { + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = 0u; + } + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++) { + + TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous, + ts + s * ThreadMap::Delta::kStrided); + + TensorCoord coord = thread_offset_ + iteration_coord; + + bool guard; + + if (is_steady_state) { + if (kAdvanceRank == 0) { + guard = (coord.strided() < extent_.strided()); + } else { + guard = (coord.contiguous() < extent_.contiguous()); + } + } else { + guard = (coord.strided() < extent_.strided() && + coord.contiguous() < extent_.contiguous()); + } + + int pred_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided; + int word_idx = pred_idx / kPredicatesPerWord; + int residual = pred_idx % kPredicatesPerWord; + int byte_idx = residual / kPredicatesPerByte; + int bit_idx = residual % kPredicatesPerByte; + + predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx)); + + } + } + } + + } + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator2dThreadTile( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset) + : params_(params), + pointer_(reinterpret_cast( + const_cast(pointer))), + extent_(extent), + is_residue_tile_(true) { + + + TensorCoord residue_offset; + if (kAdvanceRank) { + residue_tile_idx_ = + (extent_[kAdvanceRank] - threadblock_offset[kAdvanceRank] - 1) / + Shape::kStrided; + residue_offset = make_Coord(0, residue_tile_idx_ * Shape::kStrided); + } else { + residue_tile_idx_ = + (extent_[kAdvanceRank] - threadblock_offset[kAdvanceRank] - 1) / + Shape::kContiguous; + residue_offset = make_Coord(residue_tile_idx_ * Shape::kContiguous, 0); + } + + // Per-thread offset in logical coordinates of tensor + thread_offset_ = threadblock_offset + residue_offset + + ThreadMap::initial_offset(thread_id); + + // update internal pointers + Layout layout(params_.stride_); + add_pointer_offset(layout(thread_offset_)); + + compute_predicates_(false); + + set_iteration_index(0); + } + + /// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator2dThreadTile( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id) + : PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + + int residual = index % (ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided); + iteration_strided_ = index / (ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided); + + iteration_contiguous_ = residual / ThreadMap::ThreadAccessShape::kStrided; + iteration_thread_ = residual % ThreadMap::ThreadAccessShape::kStrided; + + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + pointer_ += int(sizeof(Element)) * pointer_offset; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + void add_tile_offset( + TensorCoord const &tile_offset) { + if (is_residue_tile_) { + TensorCoord residue_offset; + if (kAdvanceRank) { + residue_offset = TensorCoord(0, residue_tile_idx_ * Shape::kStrided); + } else { + residue_offset = TensorCoord(residue_tile_idx_ * Shape::kContiguous, 0); + } + + thread_offset_ -= residue_offset; + + Layout layout(params_.stride_); + add_pointer_offset(-layout(residue_offset)); + + compute_predicates_(true); + + if (kAdvanceRank) { + pointer_ += params_.inc_advance_ * (tile_offset.strided() - 1); + pointer_ += Shape::kContiguous * tile_offset.contiguous(); + } else { + pointer_ += params_.inc_advance_ * (tile_offset.contiguous() - 1); + pointer_ += Shape::kStrided * tile_offset.strided(); + } + } else { + if (kAdvanceRank) { + pointer_ += params_.inc_advance_ * tile_offset.strided(); + pointer_ += Shape::kContiguous * tile_offset.contiguous(); + } else { + pointer_ += params_.inc_advance_ * tile_offset.contiguous(); + pointer_ += Shape::kStrided * tile_offset.strided(); + } + } + is_residue_tile_ = false; + } + + CUTLASS_HOST_DEVICE + AccessType *get() const { + + AccessType *ret_val = reinterpret_cast( + pointer_ + (iteration_thread_ * params_.stride_ + iteration_contiguous_ * ThreadMap::Delta::kContiguous) * int(sizeof(Element))); + + return ret_val; + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator2dThreadTile &operator++() { + + iteration_thread_++; + + if (iteration_thread_ < ThreadMap::ThreadAccessShape::kStrided) + return *this; + + iteration_thread_ = 0; + + ++iteration_contiguous_; + + if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) + return *this; + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + iteration_contiguous_ = 0; + ++iteration_strided_; + + if (iteration_strided_ < ThreadMap::Iterations::kStrided) { + pointer_ += params_.inc_strided_; + return *this; + } + + // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + iteration_strided_ = 0; + + // advance to next tile + pointer_ += params_.inc_next_; + + // now return to start tile - if the iterator is subsequently advanced, this + // subtraction as well as the subsequent integer addition are both elided by + // the compiler. + pointer_ -= params_.inc_advance_; + + return *this; + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator2dThreadTile operator++(int) { + PredicatedTileAccessIterator2dThreadTile self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = enable ? 0u : predicates_[i]; + } + + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = 0xffffffff; + } + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = mask[i]; + } + + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + mask[i] = predicates_[i]; + } + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + + int pred_idx = + iteration_thread_ + + iteration_contiguous_ * ThreadMap::ThreadAccessShape::kStrided + + iteration_strided_ * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided; + + int word_idx = pred_idx / kPredicatesPerWord; + int residual = pred_idx % kPredicatesPerWord; + int byte_idx = residual / kPredicatesPerByte; + int bit_idx = residual % kPredicatesPerByte; + + bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0; + + return pred; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIterator2dThreadTile { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileAccessIterator2dThreadTile< + layout::PitchLinearShape, Element, + layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIterator2dThreadTile; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0))){} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const &base) + : params_(base) {} + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator2dThreadTile( + ///< Precomputed parameters object + Params const ¶ms, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const &threadblock_offset) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.row(), extent.column()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row(), + threadblock_offset.column())) {} + + /// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator2dThreadTile( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator2dThreadTile &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator2dThreadTile operator++(int) { + PredicatedTileAccessIterator2dThreadTile self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return iterator_.valid(); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIterator2dThreadTile { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileAccessIterator2dThreadTile< + layout::PitchLinearShape, Element, + layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIterator2dThreadTile; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0))){} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const &base) + : params_(base) {} + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator2dThreadTile( + ///< Precomputed parameters object + Params const ¶ms, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const &threadblock_offset) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), + threadblock_offset.row())) {} + + /// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator2dThreadTile( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator2dThreadTile &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIterator2dThreadTile operator++(int) { + PredicatedTileAccessIterator2dThreadTile self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return iterator_.valid(); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_params.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_params.h new file mode 100644 index 0000000000000000000000000000000000000000..f284b0ae8d74e1c9443193151b15dfdce7573d06 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_params.h @@ -0,0 +1,301 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief +*/ + +/* + Note: CUTLASS 3x increases the host compiler requirements to C++17. However, certain + existing integrations of CUTLASS require C++11 host compilers. + + Until this requirement can be lifted, certain headers with this annotation are required + to be remain consistent with C++11 syntax. + + C++11 compatibility is enforced by this unit test: `cutlass_test_unit_core_cpp11`. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/pitch_linear.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Predicated tile access iterator descriptor object containing template dependent state +struct PredicatedTileAccessIteratorDesc { + + int element_size_bits; + int advance_rank; + layout::PitchLinearCoord threadblock_shape; + layout::PitchLinearCoord threadmap_iterations; + layout::PitchLinearCoord threadmap_delta; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorDesc() { } + + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorDesc( + int element_size_bits_, + int advance_rank_, + layout::PitchLinearCoord threadblock_shape_, + layout::PitchLinearCoord threadmap_iterations_, + layout::PitchLinearCoord threadmap_delta_ + ): + element_size_bits(element_size_bits_), + advance_rank(advance_rank_), + threadblock_shape(threadblock_shape_), + threadmap_iterations(threadmap_iterations_), + threadmap_delta(threadmap_delta_) + { + #if 0 + printf("PredicatedTileAccessIteratorDesc(%d, %d, {%d, %d}, {%d, %d}, {%d, %d}})\n", + element_size_bits, + advance_rank, + threadblock_shape.contiguous(), threadblock_shape.strided(), + threadmap_iterations.contiguous(), threadmap_iterations.strided(), + threadmap_delta.contiguous(), threadmap_delta.strided()); + #endif + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Helper template to construct an PredicatedTileAccessIteratorDesc from a template +// dependent state +template < + typename Shape, typename Element, typename Layout, + int AdvanceRank, typename ThreadMap> + struct MakePredicatedTileAccessIteratorDesc; +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for pitch-linear data. +template < + typename Shape, typename Element, int AdvanceRank, + typename ThreadMap> +struct MakePredicatedTileAccessIteratorDesc < + Shape, Element, layout::PitchLinear, AdvanceRank, ThreadMap> { + + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorDesc operator()() { + + return PredicatedTileAccessIteratorDesc( + sizeof_bits::value, + AdvanceRank, + {Shape::kContiguous, Shape::kStrided}, + {ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided}, + {ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided} + ); +} + +}; +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for column-major data. +template < + typename Shape, typename Element, int AdvanceRank, + typename ThreadMap> +struct MakePredicatedTileAccessIteratorDesc < + Shape, Element, layout::ColumnMajor, AdvanceRank, ThreadMap> { + + static int const kAdvanceRank = AdvanceRank; + + using UnderlyingMakeOperator = MakePredicatedTileAccessIteratorDesc< + layout::PitchLinearShape, Element, + layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap>; + + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorDesc operator()() { + + return UnderlyingMakeOperator()(); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for row-major data. +template < + typename Shape, typename Element, int AdvanceRank, + typename ThreadMap> +struct MakePredicatedTileAccessIteratorDesc < + Shape, Element, layout::RowMajor, AdvanceRank, ThreadMap> { + + static int const kAdvanceRank = AdvanceRank; + + using UnderlyingMakeOperator = MakePredicatedTileAccessIteratorDesc< + layout::PitchLinearShape, Element, + layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap>; + + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorDesc operator()() { + + return UnderlyingMakeOperator()(); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for column-major interleaved data. +template < + typename Shape, typename Element, int AdvanceRank, + typename ThreadMap, int InterleavedK> +struct MakePredicatedTileAccessIteratorDesc < + Shape, Element, layout::ColumnMajorInterleaved, AdvanceRank, ThreadMap> { + + static int const kAdvanceRank = AdvanceRank; + static int const kInterleavedK = InterleavedK; + + using UnderlyingMakeOperator = MakePredicatedTileAccessIteratorDesc< + layout::PitchLinearShape, Element, + layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap>; + + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorDesc operator()() { + + return UnderlyingMakeOperator()(); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIterator for roww-major interleaved data. +template < + typename Shape, typename Element, int AdvanceRank, + typename ThreadMap, int InterleavedK> +struct MakePredicatedTileAccessIteratorDesc < + Shape, Element, layout::RowMajorInterleaved, AdvanceRank, ThreadMap> { + + static int const kAdvanceRank = AdvanceRank; + static int const kInterleavedK = InterleavedK; + + using UnderlyingMakeOperator = MakePredicatedTileAccessIteratorDesc< + layout::PitchLinearShape, Element, + layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap>; + + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorDesc operator()() { + + return UnderlyingMakeOperator()(); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// +// Parameters struct +// + +struct PredicatedTileAccessIteratorParams { + + using Index = int32_t; + using LongIndex = int64_t; + + // + // Data members + // + /// stride of pitch-linear layout (units of Element) + LongIndex stride_; + /// amount (in byte) to increment pointer to move to next access along + /// strided dimension + LongIndex inc_strided_; + /// amount (in byte) to increment pointer from last access to first access + /// of next tile + LongIndex inc_next_; + /// amount (in byte) to increment pointer from first access of current tile + /// to first access of next tile + LongIndex inc_advance_; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Status initialize(LongIndex stride, PredicatedTileAccessIteratorDesc desc) { + + stride_ = stride; + + inc_strided_ = (LongIndex(stride_) * desc.threadmap_delta.strided()) * + desc.element_size_bits / 8; + + if (desc.advance_rank) { + // advance along strided dimension + inc_advance_ = + desc.threadblock_shape.strided() * LongIndex(stride_) * desc.element_size_bits / 8; + } else { + // advance along contiguous dimension + inc_advance_ = desc.threadblock_shape.contiguous() * desc.element_size_bits / 8; + } + + inc_next_ = inc_advance_ - LongIndex(desc.threadmap_iterations.strided() - 1) * + desc.threadmap_delta.strided() * LongIndex(stride_) * + desc.element_size_bits / 8; + + return Status::kSuccess; + } + + CUTLASS_HOST_DEVICE + Status initialize(Index stride, PredicatedTileAccessIteratorDesc desc) { + return initialize(LongIndex(stride), desc); + } + + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorParams() { + initialize(LongIndex(0), PredicatedTileAccessIteratorDesc()); + } + + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorParams(Index stride, PredicatedTileAccessIteratorDesc desc) { + initialize(stride, desc); + } + + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorParams(LongIndex stride, PredicatedTileAccessIteratorDesc desc) { + initialize(stride, desc); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_triangular_matrix.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_triangular_matrix.h new file mode 100644 index 0000000000000000000000000000000000000000..d304b99e142ce31b4a19531f28c70b462ef8edae --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_triangular_matrix.h @@ -0,0 +1,892 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates calculating the address and predicates to the load of tiles + from pitch-linear rank=2 tensors. + + This iterator uses masks to guard out-of-bounds accesses and visits the last + "residue" tile first, with the objective of minimizing predicate mask updates + during steady-state operation. + + A precomputed "Params" object minimizes the amount of state that must be + stored in registers, and integer addition is used to advance the pointer + through memory. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/predicate_vector.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/tensor_view.h" + +//////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// PredicatedTileAccessIteratorTriangularMatrix +/// +template +class PredicatedTileAccessIteratorTriangularMatrix; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for pitch-linear data. +/// +template +class PredicatedTileAccessIteratorTriangularMatrix { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; + + using CompareOp = typename TrMatrixCompareOp::Type; + + static_assert( kFillMode == FillMode::kFull || + ((kFillMode == FillMode::kLower || kFillMode == FillMode::kUpper) && AccessType::kElements == 1), + "BLAS3 iterator for the triangular/symmetric matrix must use AccessType::kElements as 1"); + + static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), + "Vectors implied by the thread map must be divisible by the access type."); + + static int const kPredicatesPerByte = 4; + static int const kPredicatesPerWord = 4 * kPredicatesPerByte; + + static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector; + + /// Number of 32b words containing predicates + static int const kPredicateByteCount = + (kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte; + static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4; + + static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u; + + static_assert(kPredicateWordCount <= 4, "Too many predicates."); + + /// Predicate vector stores mask to guard accesses + using Mask = Array; + + /// Parameters object is precomputed state and is host-constructible + class Params { + public: + friend PredicatedTileAccessIteratorTriangularMatrix; + + private: + /// stride of pitch-linear layout (units of Element) + StrideIndex stride_; + /// (true) pitch-linear layout is mapped to row-major matrix + /// (false) pitch-linear layout is mapped to column-major matrix + bool is_row_major_; + /// for vectorized access across the diagonal boundary guard condition is + /// checked for the element on the boundary + int access_diagonal_boundary_; + /// amount (in byte) to increment pointer to move to next access along + /// strided dimension + LongIndex inc_strided_; + /// amount (in byte) to increment pointer from last access to first access + /// of next tile + LongIndex inc_next_; + /// amount (in byte) to increment pointer from first access of current tile + /// to first access of next tile + LongIndex inc_advance_; + + public: + + // Default ctor + CUTLASS_HOST_DEVICE + Params(): stride_(0), inc_strided_(0), inc_next_(0), inc_advance_(0), is_row_major_(false), access_diagonal_boundary_(0) { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout, bool is_row_major, int access_diagonal_boundary) : + stride_(layout.stride(0)), is_row_major_(is_row_major), access_diagonal_boundary_(access_diagonal_boundary) { + + inc_strided_ = (LongIndex(stride_) * ThreadMap::Delta::kStrided) * + sizeof_bits::value / 8; + + if (kAdvanceRank) { + // advance along strided dimension + inc_advance_ = + Shape::kStrided * LongIndex(stride_) * sizeof_bits::value / 8; + } else { + // advance along contiguous dimension + inc_advance_ = Shape::kContiguous * sizeof_bits::value / 8; + } + + inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kStrided - 1) * + ThreadMap::Delta::kStrided * LongIndex(stride_) * + sizeof_bits::value / 8; + + }; + + + }; + + private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char *; + + private: + // + // Data members + // + + /// Parameters object with precomputed internal state + Params const ¶ms_; + + /// Internal pointer to first access of tile + BytePointer pointer_; + + /// Guard predicates + uint32_t predicates_[kPredicateWordCount]; + + /// Track global memory addresses on the diagonal + /// To ignore imag part for diagonal elements of hermitian matrices + uint32_t predicates_onDiag_[kPredicateWordCount]; + + /// Size of tensor + TensorCoord extent_; + + /// Initial offset for each thread + TensorCoord thread_offset_; + + /// Iteration along vectors implied by the thread map + int iteration_vector_; + + /// Iteration in the contiguous dimension + int iteration_contiguous_; + + /// Iteration in the strided dimension + int iteration_strided_; + + private: + /// Computes predicates based on internally tracked per-thread offset. + CUTLASS_DEVICE + void compute_predicates_( + /// Extent of the matrix window + TensorCoord extent) { + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = 0u; + predicates_onDiag_[i] = 0u; + } + + CompareOp compare_op; + + CUTLASS_PRAGMA_UNROLL + for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) { + + int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector); + + int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector); + + int c = access_residual / kAccessesPerVector; + int v = access_residual % kAccessesPerVector; + + TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements, + s * ThreadMap::Delta::kStrided); + + TensorCoord coord = thread_offset_ + iteration_coord; + + bool guard; + bool onDiag = false; + + guard = ((coord.strided() < extent.strided()) && + (coord.contiguous() < extent.contiguous())); + + + // guard access on the wrong side of the triagular matrix diagonal + if (kFillMode == FillMode::kLower || kFillMode == FillMode::kUpper) { + coord += TensorCoord{params_.access_diagonal_boundary_, 0}; + + bool triagular_guard_row_major = compare_op(coord.strided(), coord.contiguous()) | !params_.is_row_major_; + bool triagular_guard_col_major = compare_op(coord.contiguous(), coord.strided()) | params_.is_row_major_; + + guard = guard && triagular_guard_row_major && triagular_guard_col_major; + + if (kDiagType == DiagType::kUnit) { + onDiag = (guard && coord.strided() == coord.contiguous()) ? true : false; + } + } + + int pred_idx_onDiag = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s); + int word_idx_onDiag = pred_idx_onDiag / kPredicatesPerWord; + int residual_onDiag = pred_idx_onDiag % kPredicatesPerWord; + int byte_idx_onDiag = residual_onDiag / kPredicatesPerByte; + int bit_idx_onDiag = residual_onDiag % kPredicatesPerByte; + + predicates_onDiag_[word_idx_onDiag] |= (unsigned(onDiag) << (byte_idx_onDiag * 8 + bit_idx_onDiag)); + + int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s); + + int word_idx = pred_idx / kPredicatesPerWord; + int residual = pred_idx % kPredicatesPerWord; + int byte_idx = residual / kPredicatesPerByte; + int bit_idx = residual % kPredicatesPerByte; + + predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx)); + + } + + } + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorTriangularMatrix( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset) + : params_(params), + pointer_(reinterpret_cast(const_cast(pointer))), + extent_(extent) { + + + // Per-thread offset in logical coordinates of tensor + thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id); + + // update internal pointers + Layout layout(params_.stride_); + add_pointer_offset(layout(thread_offset_)); + + compute_predicates_(extent_); + + set_iteration_index(0); + } + + /// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorTriangularMatrix( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id) + : PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + + iteration_vector_ = index % kAccessesPerVector; + int residual_access = index / kAccessesPerVector; + + iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous; + iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous; + + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + pointer_ += sizeof_bits::value * pointer_offset / 8; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + + if (kAdvanceRank) { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided()); + pointer_ += Shape::kContiguous * tile_offset.contiguous(); + thread_offset_ += TensorCoord{0, Shape::kStrided * tile_offset.strided()}; + } else { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous()); + pointer_ += Shape::kStrided * tile_offset.strided(); + thread_offset_ += TensorCoord{Shape::kContiguous * tile_offset.contiguous(), 0}; + } + + compute_predicates_(extent_); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast( + pointer_ + + iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits::value) / 8) + iteration_vector_; + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorTriangularMatrix &operator++() { + + ++iteration_vector_; + if (iteration_vector_ < kAccessesPerVector) { + return *this; + } + + iteration_vector_ = 0; + ++iteration_contiguous_; + + if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { + return *this; + } + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + iteration_contiguous_ = 0; + ++iteration_strided_; + + if (iteration_strided_ < ThreadMap::Iterations::kStrided) { + pointer_ += params_.inc_strided_; + return *this; + } + + // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + iteration_strided_ = 0; + + // advance to next tile + pointer_ += params_.inc_next_; + + // now return to start tile - if the iterator is subsequently advanced, this + // subtraction as well as the subsequent integer addition are both elided by + // the compiler. + pointer_ -= params_.inc_advance_; + + return *this; + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorTriangularMatrix operator++(int) { + PredicatedTileAccessIteratorTriangularMatrix self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = enable ? 0u : predicates_[i]; + } + + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = 0xffffffff; + } + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + predicates_[i] = mask[i]; + } + + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kPredicateWordCount; ++i) { + mask[i] = predicates_[i]; + } + } + + /// Return if the address in on the diagonal + CUTLASS_HOST_DEVICE + bool getOnDiag() { + int pred_idx = + iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous); + + int word_idx = pred_idx / kPredicatesPerWord; + int residual = pred_idx % kPredicatesPerWord; + int byte_idx = residual / kPredicatesPerByte; + int bit_idx = residual % kPredicatesPerByte; + + bool pred = (predicates_onDiag_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0; + return pred; + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + + + int pred_idx = + iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous); + + int word_idx = pred_idx / kPredicatesPerWord; + int residual = pred_idx % kPredicatesPerWord; + int byte_idx = residual / kPredicatesPerByte; + int bit_idx = residual % kPredicatesPerByte; + + bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0; + return pred; + + + //return true; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for column-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIteratorTriangularMatrix { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileAccessIteratorTriangularMatrix< + layout::PitchLinearShape, Element, + layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, + kSideMode, kFillMode, kDiagType, AccessType>; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + static int const kAccessDiagonalBoundary = + (kFillMode == FillMode::kLower) ? (AccessType::kElements - 1) : 0; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIteratorTriangularMatrix; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0)), false, kAccessDiagonalBoundary){}; + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorTriangularMatrix( + ///< Precomputed parameters object + Params const ¶ms, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const &threadblock_offset) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.row(), extent.column()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row(), + threadblock_offset.column())) {} + + /// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorTriangularMatrix( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorTriangularMatrix &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorTriangularMatrix operator++(int) { + PredicatedTileAccessIteratorTriangularMatrix self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// Return if the address in on the diagonal + CUTLASS_HOST_DEVICE + bool getOnDiag() { + return iterator_.getOnDiag(); + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return iterator_.valid(); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for row-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIteratorTriangularMatrix { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileAccessIteratorTriangularMatrix< + layout::PitchLinearShape, Element, + layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, + kSideMode, kFillMode, kDiagType, AccessType>; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + static int const kAccessDiagonalBoundary = + (kFillMode == FillMode::kUpper) ? (AccessType::kElements - 1) : 0; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIteratorTriangularMatrix; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default ctor + CUTLASS_HOST_DEVICE + Params() { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0)), true, kAccessDiagonalBoundary){}; + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorTriangularMatrix( + ///< Precomputed parameters object + Params const ¶ms, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const &threadblock_offset) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), + threadblock_offset.row())) {} + + /// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorTriangularMatrix( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorTriangularMatrix &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorTriangularMatrix operator++(int) { + PredicatedTileAccessIteratorTriangularMatrix self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// Return if the address in on the diagonal + CUTLASS_HOST_DEVICE + bool getOnDiag() { + return iterator_.getOnDiag(); + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return iterator_.valid(); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..ada679fd09e2d1cda38381789306039fefed9829 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator.h @@ -0,0 +1,1887 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing loading of tiles from pitch-linear rank=2 tensors. + + This iterator uses masks to guard out-of-bounds accesses. The first tile this + iterator visits maybe partial, then the remaining tiles are complete. So, we + only need to compute the predicates twice, once before the first tile and + once for the remaining full tiles which can share the same predicates. + + A precomputed "Params" object minimizes the amount of state that must be stored in registers, + and integer addition is used to advance the pointer through memory. +*/ + +#pragma once + +#include "cutlass/arch/memory.h" +#include "cutlass/transform/threadblock/predicated_tile_access_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// PredicatedTileIterator +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +/// Regular tile iterator using a precomputed control structure to minimize register liveness +/// and integer arithmetic. +/// +/// Layout is assumed to be invariant at the time the precomputed "Params" object is constructed. +/// +/// Base pointer and tensor extents may be specified at the time the iterator is constructed. +/// Subsequently, they are assumed to be immutable. +/// +/// Adding a logical coordinate offset may be performed at the time the iterator is constructed. +/// Subsequent additions to logical coordinate offset may be performed but are relatively expensive. +/// +/// Visitation order is intended to first visit a "residual" tile that may be partially full in +/// both the advance dimension and the steady-state dimension. This is assumed to be the last +/// tile in the iteration sequence. Advancing an iterator that has just been constructed moves to +/// the first tile that is full in the advance dimension and recomputes predicates. Subsequent +/// accesses may be performed without updating internal predicates and are efficient in terms of +/// live register state and pointer arithmetic instructions. +/// +/// To be efficient, this assumes the iterator will be dereferenced and advanced at least once +/// outside any looping structure to minimize integer arithmetic. +/// +/// Acceses out of bounds are safe so long as `clear_mask()` is called prior to dereferencing +/// the iterator. +/// +/// +/// Example: +/// +/// An efficient pipeline structure may be constructed as follows: +/// +// template +// __global__ void kernel( +// typename Iterator::Params params, +// typename Iterator::Element *ptr, +// TensorCoord extent) { +// +// typename Iterator::Fragment fragment; +// +// TensorCoord threadblock_offset(0, 0); +// +// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets); +// +// +// fragment = *iter; // load "residue" tile first +// ++iter; // advance to first "steady state" tile and update internal masks +// +// +// #pragma unroll +// for (int i = Remaining - 1; i >= 0; --i) { +// +// f(fragment); +// +// if (!i) { +// iter.clear_mask(); // light-weight operation to clear masks - subsequent loads become NO-OPs. +// } +// +// fragment = *iter; // load tile during "steady state" phase +// ++iter; // advance to next tile - lightweight due to steady-state masks +// } +// } +// +// void host(TensorView view) { +// +// using Iterator = transform::threadblock::PredicatedTileIterator; +// +// typename Iterator::Params params(view.layout()); +// +// kernel(params, view.data()); +// } +/// +/// +template < + typename Shape, + typename Element, + typename Layout, + int AdvanceRank, + typename ThreadMap, + int AccessSize = ThreadMap::kElementsPerAccess, + bool Gather = false, + typename PermuteLayout = layout::NoPermute +> +class PredicatedTileIterator; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIterator for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileIterator { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + /// Type used for internal memory accesses + using AccessType = AlignedArray::value / 8)>; + + /// Underlying iterator to compute the addresses + using TileAccessIterator = + PredicatedTileAccessIterator; + + static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename TileAccessIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + public: + using Base = typename TileAccessIterator::Params::Base; + + friend PredicatedTileIterator; + + private: + /// Parameters object + typename TileAccessIterator::Params params_; + + public: + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) : params_(layout) {} + + /// Default constructor + Params() = default; + + CUTLASS_HOST_DEVICE + Params(Base const &base) + : params_(base) {} + }; + + private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char *; + + private: + // + // Data members + // + + /// Data member to the tile access iterator + TileAccessIterator address_iterator_; + + public: + + /// Default constructor + PredicatedTileIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset, + /// Gather indices + int const *indices = nullptr) + : address_iterator_(params.params_, pointer, extent, thread_id, + threadblock_offset, indices) {} + + /// Construct a PredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + address_iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator &operator++() { + if (kAdvanceRank) + address_iterator_.add_tile_offset({0, 1}); + else + address_iterator_.add_tile_offset({1, 0}); + + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator operator++(int) { + PredicatedTileIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { address_iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { address_iterator_.get_mask(mask); } + + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + load_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + CUTLASS_DEVICE + void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) { + + AccessType *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < kAccessesPerVector; ++v) { + + int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); + + address_iterator_.set_iteration_index(idx); + char const *byte_ptr = reinterpret_cast(address_iterator_.get()) + byte_offset; + + AccessType const *access_ptr = reinterpret_cast(byte_ptr); + + cutlass::arch::global_load( + frag_ptr[idx], access_ptr, address_iterator_.valid()); + + ++address_iterator_; + } + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_byte_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + store_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) { + address_iterator_.set_iteration_index(0); + AccessType const *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < kAccessesPerVector; ++v) { + + int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); + + char *byte_ptr = reinterpret_cast(address_iterator_.get()) + byte_offset; + AccessType *access_ptr = reinterpret_cast(byte_ptr); + + if (address_iterator_.valid()) { + *access_ptr = frag_ptr[idx]; + } + ++address_iterator_; + } + } + } + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_byte_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIterator for column-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int AccessSize, + bool Gather, + typename PermuteLayout +> +class PredicatedTileIterator { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileIterator< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap, + AccessSize, + Gather, + PermuteLayout + >; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + + friend PredicatedTileIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default constructor + Params() = default; + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) + {} + + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const &base) + : params_(base) {} + }; + + +private: + + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + + /// Default constructor + PredicatedTileIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + TensorCoord const &threadblock_offset, ///< Initial offset of threadblock + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ): + iterator_( + params.params_, + pointer, + layout::PitchLinearCoord(extent.row(), extent.column()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()), + indices) + { } + + /// Construct a PredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ): PredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator operator++(int) { + PredicatedTileIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + iterator_.clear_mask(enable); + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + iterator_.enable_mask(); + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + iterator_.set_mask(mask); + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + iterator_.get_mask(mask); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) { + iterator_.store_with_byte_offset(frag, byte_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIterator for row-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int AccessSize, + bool Gather, + typename PermuteLayout +> +class PredicatedTileIterator { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileIterator< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap, + AccessSize, + Gather, + PermuteLayout + >; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + + friend PredicatedTileIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default constructor + Params() = default; + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {} + + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const &base) + : params_(base) {} + + }; + +private: + + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + + /// Default constructor + PredicatedTileIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + TensorCoord const &threadblock_offset, ///< Initial offset of threadblock + int const *indices = nullptr ///< Gather indices + ): + iterator_( + params.params_, + pointer, + layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()), + indices + ) { } + + /// Construct a PredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ): PredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator operator++(int) { + PredicatedTileIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + iterator_.clear_mask(enable); + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + iterator_.enable_mask(); + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + iterator_.set_mask(mask); + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + iterator_.get_mask(mask); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) { + iterator_.store_with_byte_offset(frag, byte_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIterator for affine rank-2 data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileIterator, AdvanceRank, + ThreadMap_, AccessSize, false> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::AffineRankN<2>; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + /// Type used for internal memory accesses + using AccessType = AlignedArray::value / 8)>; + + /// Underlying iterator to compute the addresses + using TileAccessIterator = + PredicatedTileAccessIterator; + + static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename TileAccessIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + public: + + friend PredicatedTileIterator; + + private: + /// Parameters object + typename TileAccessIterator::Params params_; + + public: + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) : params_(layout) {} + + /// Default constructor + Params() = default; + }; + + private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char *; + + private: + // + // Data members + // + + /// Data member to the tile access iterator + TileAccessIterator address_iterator_; + + public: + + /// Default constructor + PredicatedTileIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset, + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ) + : address_iterator_(params.params_, pointer, extent, thread_id, + threadblock_offset) {} + + /// Construct a PredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + address_iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator &operator++() { + if (kAdvanceRank) + address_iterator_.add_tile_offset(make_Coord(0, 1)); + else + address_iterator_.add_tile_offset(make_Coord(1, 0)); + + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator operator++(int) { + PredicatedTileIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { address_iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { address_iterator_.get_mask(mask); } + + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + load_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + CUTLASS_DEVICE + void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) { + + AccessType *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < kAccessesPerVector; ++v) { + + int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); + + address_iterator_.set_iteration_index(idx); + char const *byte_ptr = reinterpret_cast(address_iterator_.get()) + byte_offset; + + AccessType const *access_ptr = reinterpret_cast(byte_ptr); + + cutlass::arch::global_load( + frag_ptr[idx], access_ptr, address_iterator_.valid()); + + ++address_iterator_; + } + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_byte_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + store_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) { + address_iterator_.set_iteration_index(0); + AccessType const *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < kAccessesPerVector; ++v) { + + int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); + + char *byte_ptr = reinterpret_cast(address_iterator_.get()) + byte_offset; + AccessType *access_ptr = reinterpret_cast(byte_ptr); + + if (address_iterator_.valid()) { + *access_ptr = frag_ptr[idx]; + } + ++address_iterator_; + } + } + } + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_byte_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIterator for affine rank 2 column-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int AccessSize +> +class PredicatedTileIterator { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::AffineRank2ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + // Map to the underlying AffineRankN<2> layout + using UnderlyingIterator = PredicatedTileIterator< + layout::PitchLinearShape, + Element, + layout::AffineRankN<2>, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap, + AccessSize + >; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + + friend PredicatedTileIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default constructor + Params() = default; + + /// Construct the Params object given an AffineRankN<2> tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout): params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1))) + {} + }; + +private: + + // + // Data members + // + + /// Underlying AffineRankN<2> tile iterator + UnderlyingIterator iterator_; + +public: + + /// Default constructor + PredicatedTileIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + TensorCoord const &threadblock_offset, ///< Initial offset of threadblock + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ): + iterator_( + params.params_, + pointer, + layout::PitchLinearCoord(extent.row(), extent.column()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()) + ) { } + + /// Construct a PredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ): PredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator operator++(int) { + PredicatedTileIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + iterator_.clear_mask(enable); + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + iterator_.enable_mask(); + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + iterator_.set_mask(mask); + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + iterator_.get_mask(mask); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) { + iterator_.store_with_byte_offset(frag, byte_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIterator for affine rank 2 row-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int AccessSize +> +class PredicatedTileIterator { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::AffineRank2RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + // Map to the underlying AffineRankN<2> layout + using UnderlyingIterator = PredicatedTileIterator< + layout::PitchLinearShape, + Element, + layout::AffineRankN<2>, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap, + AccessSize + >; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + + friend PredicatedTileIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default constructor + Params() = default; + + /// Construct the Params object given an AffineRankN<2> tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout): params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))) {} + }; + + +private: + + // + // Data members + // + + /// Underlying AffineRankN<2> tile iterator + UnderlyingIterator iterator_; + +public: + + /// Default constructor + PredicatedTileIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + TensorCoord const &threadblock_offset, ///< Initial offset of threadblock + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ): + iterator_( + params.params_, + pointer, + layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()) + ) { } + + /// Construct a PredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ): PredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator operator++(int) { + PredicatedTileIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + iterator_.clear_mask(enable); + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + iterator_.enable_mask(); + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + iterator_.set_mask(mask); + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + iterator_.get_mask(mask); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) { + iterator_.store_with_byte_offset(frag, byte_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIterator for interleaved data. It is mapped +/// to the congruous layout. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// + +template +class PredicatedTileIterator, + AdvanceRank, ThreadMap_, AccessSize, false> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + static int const kInterleavedK = InterleavedK; + using Layout = layout::ColumnMajorInterleaved; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileIterator< + layout::PitchLinearShape, + Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessSize>; + + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default constructor + Params() = default; + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0))) {} + + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const &base) + : params_(base) {} + + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + + /// Default constructor + PredicatedTileIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset, + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.row() * kInterleavedK, + extent.column() / kInterleavedK), + thread_id, + layout::PitchLinearCoord( + threadblock_offset.row() * kInterleavedK, + threadblock_offset.column() / kInterleavedK)) {} + + /// Construct a PredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator operator++(int) { + PredicatedTileIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIterator for interleaved-32 data. It is +/// mapped to the congruous layout. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileIterator, + AdvanceRank, ThreadMap_, AccessSize, false> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + static int const kInterleavedK = InterleavedK; + using Layout = layout::RowMajorInterleaved; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileIterator< + layout::PitchLinearShape, + Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessSize>; + + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileIterator; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + /// Default constructor + Params() = default; + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) + : params_(layout::PitchLinear(layout.stride(0))) {} + + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const &base) + : params_(base) {} + }; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + + /// Default constructor + PredicatedTileIterator() = default; + + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset, + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ) + : iterator_(params.params_, pointer, + layout::PitchLinearCoord(extent.column() * kInterleavedK, + extent.row() / kInterleavedK), + thread_id, + layout::PitchLinearCoord( + threadblock_offset.column() * kInterleavedK, + threadblock_offset.row() / kInterleavedK)) {} + + /// Construct a PredicatedTileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileIterator( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileIterator(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator operator++(int) { + PredicatedTileIterator self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { iterator_.get_mask(mask); } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h new file mode 100644 index 0000000000000000000000000000000000000000..0a685fc6329695b1426bf2fd201038e546eea768 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h @@ -0,0 +1,787 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing loading of tiles from pitch-linear rank=2 tensors. + + This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile + first, with the objective of minimizing predicate mask updates during steady-state operation. + + A precomputed "Params" object minimizes the amount of state that must be stored in registers, + and integer addition is used to advance the pointer through memory. +*/ + +#pragma once + +#include "cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h" +#include "cutlass/transform/thread/transpose.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// PredicatedTileIterator2dThreadTile +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +/// Regular tile iterator using a precomputed control structure to minimize register liveness +/// and integer arithmetic. +/// +/// Layout is assumed to be invariant at the time the precomputed "Params" object is constructed. +/// +/// Base pointer and tensor extents may be specified at the time the iterator is constructed. +/// Subsequently, they are assumed to be immutable. +/// +/// Adding a logical coordinate offset may be performed at the time the iterator is constructed. +/// Subsequent additions to logical coordinate offset may be performed but are relatively expensive. +/// +/// Vistitation order is intended to first visit a "residual" tile that may be partially full in +/// both the advance dimension and the steady-state dimension. This is assumed to be the last +/// tile in the iteration sequence. Advancing an iterator that has just been constructed moves to +/// the first tile that is full in the advance dimension and recomputes predicates. Subsequent +/// accesses may be performed without updating internal predicates and are efficient in terms of +/// live register state and pointer arithmetic instructions. +/// +/// To be efficient, this assumes the iteraor will be dereferenced and advanced at least once +/// outside any looping structure to minimize integer arithmetic. +/// +/// Acceses out of bounds are safe so long as `clear_mask()` is called prior to dereferencing +/// the iterator. +/// +/// +/// Example: +/// +/// An efficient pipeline structure may be constructed as follows: +/// +// template +// __global__ void kernel( +// typename Iterator::Params params, +// typename Iterator::Element *ptr, +// TensorCoord extent) { +// +// typename Iterator::Fragment fragment; +// +// TensorCoord threadblock_offset(0, 0); +// +// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets); +// +// +// fragment = *iter; // load "residue" tile first +// ++iter; // advance to first "steady state" tile and update internal masks +// +// +// #pragma unroll +// for (int i = Remaining - 1; i >= 0; --i) { +// +// f(fragment); +// +// if (!i) { +// iter.clear_mask(); // light-weight operation to clear masks - subsequent loads become NO-OPs. +// } +// +// fragment = *iter; // load tile during "steady state" phase +// ++iter; // advance to next tile - lightweight due to steady-state masks +// } +// } +// +// void host(TensorView view) { +// +// using Iterator = transform::threadblock::PredicatedTileIterator2dThreadTile; +// +// typename Iterator::Params params(view.layout()); +// +// kernel(params, view.data()); +// } +/// +/// +template < + typename Shape, + typename Element, + typename Layout, + int AdvanceRank, + typename ThreadMap, + bool Transpose = false +> +class PredicatedTileIterator2dThreadTile; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileIterator2dThreadTile { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + /// Type used for internal memory accesses + /// extra set of parenthesis is needed for VS compiler + struct alignas((ThreadMap::kElementsPerAccess * sizeof_bits::value / + 8)) AccessType { + + Array storage; + + static int const kElements = ThreadMap::kElementsPerAccess; + }; + + /// Optinally this fragment can be 4x4 transposed + using Transform = thread::Transpose< ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount , layout::PitchLinearShape<4,4>, Element>; + static bool const transpose = Transpose_; + + /// Underlying iterator to compute the addresses + using TileAccessIterator = + PredicatedTileAccessIterator2dThreadTile; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename TileAccessIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + public: + using Base = typename TileAccessIterator::Params::Base; + + friend PredicatedTileIterator2dThreadTile; + + private: + /// Parameters object + typename TileAccessIterator::Params params_; + + public: + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) : params_(layout) { } + + CUTLASS_HOST_DEVICE + Params() { } + + CUTLASS_HOST_DEVICE + Params(Base const &base) + : params_(base) {} + }; + + private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char *; + + private: + // + // Data members + // + + /// Data member to the tile access iterator + TileAccessIterator address_iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIterator2dThreadTile( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset, + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ) + : address_iterator_(params.params_, pointer, extent, thread_id, + threadblock_offset) {} + + /// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileIterator2dThreadTile( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + address_iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator2dThreadTile &operator++() { + if (kAdvanceRank) + address_iterator_.add_tile_offset({0, 1}); + else + address_iterator_.add_tile_offset({1, 0}); + + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator2dThreadTile operator++(int) { + PredicatedTileIterator2dThreadTile self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { address_iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { address_iterator_.get_mask(mask); } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + + AccessType *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++){ + + int access_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + \ + s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided; + + address_iterator_.set_iteration_index(access_idx); + if (address_iterator_.valid()) { + + frag_ptr[access_idx] = + *(address_iterator_.get() + pointer_offset); + } + + ++address_iterator_; + } + } + } + + if (transpose) { + Transform t; + t.transform(frag, frag); + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + + AccessType const *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++){ + + int access_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + \ + s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided; + + address_iterator_.set_iteration_index(access_idx); + if (address_iterator_.valid()) { + *(address_iterator_.get() + pointer_offset) = frag_ptr[access_idx]; + } + ++address_iterator_; + } + } + } + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + bool Transpose_ +> +class PredicatedTileIterator2dThreadTile { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + static bool const Transpose = Transpose_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileIterator2dThreadTile< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap, + Transpose + >; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + + friend PredicatedTileIterator2dThreadTile; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + CUTLASS_HOST_DEVICE + Params() { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {} + + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const &base) + : params_(base) {} + }; + + +private: + + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + + /// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIterator2dThreadTile( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + TensorCoord const &threadblock_offset, ///< Initial offset of threadblock + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ): + iterator_( + params.params_, + pointer, + layout::PitchLinearCoord(extent.row(), extent.column()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()) + ) { } + + /// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileIterator2dThreadTile( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ): PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) { } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator2dThreadTile &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator2dThreadTile operator++(int) { + PredicatedTileIterator2dThreadTile self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + iterator_.clear_mask(enable); + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + iterator_.enable_mask(); + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + iterator_.set_mask(mask); + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + iterator_.get_mask(mask); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + bool Transpose_ +> +class PredicatedTileIterator2dThreadTile { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + static bool const Transpose = Transpose_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileIterator2dThreadTile< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap, + Transpose + >; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + + friend PredicatedTileIterator2dThreadTile; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + CUTLASS_HOST_DEVICE + Params() { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) { } + + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const &base) + : params_(base) {} + }; + + +private: + + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + + /// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIterator2dThreadTile( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + TensorCoord const &threadblock_offset, ///< Initial offset of threadblock + int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization + ): + iterator_( + params.params_, + pointer, + layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()) + ) { } + + /// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileIterator2dThreadTile( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ): PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) { } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator2dThreadTile &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIterator2dThreadTile operator++(int) { + PredicatedTileIterator2dThreadTile self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + iterator_.clear_mask(enable); + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + iterator_.enable_mask(); + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + iterator_.set_mask(mask); + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + iterator_.get_mask(mask); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator_triangular_matrix.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator_triangular_matrix.h new file mode 100644 index 0000000000000000000000000000000000000000..b849ee7a65c5a65c24acfffb204f5d8797f0ff79 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator_triangular_matrix.h @@ -0,0 +1,818 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing loading of tiles from pitch-linear rank=2 tensors. + + This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile + first, with the objective of minimizing predicate mask updates during steady-state operation. + + A precomputed "Params" object minimizes the amount of state that must be stored in registers, + and integer addition is used to advance the pointer through memory. +*/ + +#pragma once + +#include "cutlass/arch/memory.h" +#include "cutlass/transform/threadblock/predicated_tile_access_iterator_triangular_matrix.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// PredicatedTileIteratorTriangularMatrix +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +/// Regular tile iterator using a precomputed control structure to minimize register liveness +/// and integer arithmetic. +/// +/// Layout is assumed to be invariant at the time the precomputed "Params" object is constructed. +/// +/// Base pointer and tensor extents may be specified at the time the iterator is constructed. +/// Subsequently, they are assumed to be immutable. +/// +/// Adding a logical coordinate offset may be performed at the time the iterator is constructed. +/// Subsequent additions to logical coordinate offset may be performed but are relatively expensive. +/// +/// Vistitation order is intended to first visit a "residual" tile that may be partially full in +/// both the advance dimension and the steady-state dimension. This is assumed to be the last +/// tile in the iteration sequence. Advancing an iterator that has just been constructed moves to +/// the first tile that is full in the advance dimension and recomputes predicates. Subsequent +/// accesses may be performed without updating internal predicates and are efficient in terms of +/// live register state and pointer arithmetic instructions. +/// +/// To be efficient, this assumes the iteraor will be dereferenced and advanced at least once +/// outside any looping structure to minimize integer arithmetic. +/// +/// Acceses out of bounds are safe so long as `clear_mask()` is called prior to dereferencing +/// the iterator. +/// +/// +/// Example: +/// +/// An efficient pipeline structure may be constructed as follows: +/// +// template +// __global__ void kernel( +// typename Iterator::Params params, +// typename Iterator::Element *ptr, +// TensorCoord extent) { +// +// typename Iterator::Fragment fragment; +// +// TensorCoord threadblock_offset(0, 0); +// +// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets); +// +// +// fragment = *iter; // load "residue" tile first +// ++iter; // advance to first "steady state" tile and update internal masks +// +// +// #pragma unroll +// for (int i = Remaining - 1; i >= 0; --i) { +// +// f(fragment); +// +// if (!i) { +// iter.clear_mask(); // light-weight operation to clear masks - subsequent loads become NO-OPs. +// } +// +// fragment = *iter; // load tile during "steady state" phase +// ++iter; // advance to next tile - lightweight due to steady-state masks +// } +// } +// +// void host(TensorView view) { +// +// using Iterator = transform::threadblock::PredicatedTileIteratorTriangularMatrix; +// +// typename Iterator::Params params(view.layout()); +// +// kernel(params, view.data()); +// } +/// +/// +template < + typename Shape, + typename Element, + typename Layout, + int AdvanceRank, + typename ThreadMap, + SideMode kSideMode, + FillMode kFillMode, + DiagType kDiagType, + int AccessSize = ThreadMap::kElementsPerAccess +> +class PredicatedTileIteratorTriangularMatrix; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIteratorTriangularMatrix for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileIteratorTriangularMatrix { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + /// Type used for internal memory accesses + using AccessType = AlignedArray::value / 8)>; + + /// Underlying iterator to compute the addresses + using TileAccessIterator = + PredicatedTileAccessIteratorTriangularMatrix; + + static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename TileAccessIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + public: + friend PredicatedTileIteratorTriangularMatrix; + + private: + /// Parameters object + typename TileAccessIterator::Params params_; + + public: + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout) : params_(layout) { } + + CUTLASS_HOST_DEVICE + Params() { } + }; + + private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char *; + + private: + // + // Data members + // + + /// Data member to the tile access iterator + TileAccessIterator address_iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIteratorTriangularMatrix( + /// Precomputed parameters object + Params const ¶ms, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset) + : address_iterator_(params.params_, pointer, extent, thread_id, + threadblock_offset) {} + + /// Construct a PredicatedTileIteratorTriangularMatrix with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileIteratorTriangularMatrix( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileIteratorTriangularMatrix(params, pointer, extent, thread_id, + make_Coord(0, 0)) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + address_iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorTriangularMatrix &operator++() { + if (kAdvanceRank) + address_iterator_.add_tile_offset({0, 1}); + else + address_iterator_.add_tile_offset({1, 0}); + + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorTriangularMatrix operator++(int) { + PredicatedTileIteratorTriangularMatrix self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { address_iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { address_iterator_.get_mask(mask); } + + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + load_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + CUTLASS_DEVICE + void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) { + + AccessType *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < kAccessesPerVector; ++v) { + + int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); + + address_iterator_.set_iteration_index(idx); + char const *byte_ptr = reinterpret_cast(address_iterator_.get()) + byte_offset; + + AccessType const *access_ptr = reinterpret_cast(byte_ptr); + + cutlass::arch::global_load( + frag_ptr[idx], access_ptr, address_iterator_.valid()); + + ++address_iterator_; + } + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_byte_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + store_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) { + address_iterator_.set_iteration_index(0); + AccessType const *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < kAccessesPerVector; ++v) { + + int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); + + char *byte_ptr = reinterpret_cast(address_iterator_.get()) + byte_offset; + AccessType *access_ptr = reinterpret_cast(byte_ptr); + + if (address_iterator_.valid()) { + *access_ptr = frag_ptr[idx]; + } + ++address_iterator_; + } + } + } + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_byte_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIteratorTriangularMatrix for column-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + SideMode kSideMode, + FillMode kFillMode, + DiagType kDiagType, + int AccessSize +> +class PredicatedTileIteratorTriangularMatrix { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileIteratorTriangularMatrix< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap, + kSideMode, + kFillMode, + kDiagType, + AccessSize + >; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + + friend PredicatedTileIteratorTriangularMatrix; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + CUTLASS_HOST_DEVICE + Params() { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) { + + } + }; + + +private: + + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + + /// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIteratorTriangularMatrix( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + TensorCoord const &threadblock_offset ///< Initial offset of threadblock + ): + iterator_( + params.params_, + pointer, + layout::PitchLinearCoord(extent.row(), extent.column()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()) + ) { } + + /// Construct a PredicatedTileIteratorTriangularMatrix with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileIteratorTriangularMatrix( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ): PredicatedTileIteratorTriangularMatrix(params, pointer, extent, thread_id, make_Coord(0, 0)) { } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorTriangularMatrix &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorTriangularMatrix operator++(int) { + PredicatedTileIteratorTriangularMatrix self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + iterator_.clear_mask(enable); + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + iterator_.enable_mask(); + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + iterator_.set_mask(mask); + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + iterator_.get_mask(mask); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) { + iterator_.store_with_byte_offset(frag, byte_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIteratorTriangularMatrix for row-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + SideMode kSideMode, + FillMode kFillMode, + DiagType kDiagType, + int AccessSize +> +class PredicatedTileIteratorTriangularMatrix { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedTileIteratorTriangularMatrix< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap, + kSideMode, + kFillMode, + kDiagType, + AccessSize + >; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + + friend PredicatedTileIteratorTriangularMatrix; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + + CUTLASS_HOST_DEVICE + Params() { } + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) { + + }; + }; + + +private: + + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + + /// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIteratorTriangularMatrix( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + TensorCoord const &threadblock_offset ///< Initial offset of threadblock + ): + iterator_( + params.params_, + pointer, + layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()) + ) { } + + /// Construct a PredicatedTileIteratorTriangularMatrix with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedTileIteratorTriangularMatrix( + Params const ¶ms, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ): PredicatedTileIteratorTriangularMatrix(params, pointer, extent, thread_id, make_Coord(0, 0)) { } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorTriangularMatrix &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the iterator's + /// internal pointer is reverted to the first "steady state" tile. Subsequent calls + /// are lightweight and must only update the internal pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorTriangularMatrix operator++(int) { + PredicatedTileIteratorTriangularMatrix self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { + iterator_.clear_mask(enable); + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { + iterator_.enable_mask(); + } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const &mask) { + iterator_.set_mask(mask); + } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask &mask) { + iterator_.get_mask(mask); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) { + iterator_.store_with_byte_offset(frag, byte_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_vector_access_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_vector_access_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..4762175a5b651ceb5a733d6039ce277dc082dd2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_vector_access_iterator.h @@ -0,0 +1,417 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Templates implementing computing the addresses of loading small + vectors from the global memory. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/coord.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/tensor_ref.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// PredicatedVectorAccessIterator +/// +template < + /// Shape of the vector accessed by the entire threadblock + typename Shape, + /// Shape of the vector accessed by the warp + typename WarpShape, + /// Type of Element + typename Element, + /// Layout of the vector + typename Layout, + /// Number of elements for each access + int ElementsPerAccess, + /// Support residual tile + bool EnableResidualAccess = false +> +class PredicatedVectorAccessIterator; + +//////////////////////////////////////////////////////////////////////////////// + +/// Vector access iterator specialized for vectors, e.g. scale and bias +/// Thread arrangements are for TensorOps +/// +template < + typename Shape_, + typename WarpShape_, + typename Element_, + int ElementsPerAccess, + bool EnableResidualAccess +> +class PredicatedVectorAccessIterator < + Shape_, + WarpShape_, + Element_, + layout::PitchLinear, + ElementsPerAccess, + EnableResidualAccess +> { + public: + + using Shape = Shape_; + using WarpShape = WarpShape_; + using Element = Element_; + using Layout = layout::PitchLinear; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using ConstPointer = const Element *; + using NonConstPointer = typename platform::remove_const::type *; + +// static int const kElementsPerAccess = 128 / sizeof_bits::value; + static int const kElementsPerAccess = ElementsPerAccess; + static int const kThreads = 32; + static int const kRowsPerIteration = 8; + static int const kThreadsPerRow = kThreads / kRowsPerIteration; + static int const kThreadsPerRowMask = 0x3; + static int const kIterations = WarpShape::kContiguous / (kThreadsPerRow * kElementsPerAccess); + static int const kWarpCountStrided = Shape::kStrided / WarpShape::kStrided; + + using AccessType = AlignedArray; + + private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char *; + + private: + // + // Data members + // + + /// Internal pointer to first access of tile + BytePointer pointer_; + + /// Extent of tensor + TensorCoord extent_; + + /// pointer offset of each thread + TensorCoord thread_offset_; + + /// iteration index + LongIndex iteration_; + + /// residual access + bool is_residual_; + + /// residual offset of each thread + TensorCoord residual_offset_; + + public: + /// Constructs a vector access iterator + CUTLASS_HOST_DEVICE + PredicatedVectorAccessIterator( + /// Pointer to the start of the vector + ConstPointer pointer, + /// Extent of vector + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// ID of each participating warp + int warp_id, + /// Initial offset of threadblock + TensorCoord const &threadblock_offset) + : pointer_(reinterpret_cast( + const_cast(pointer))), + extent_(extent), + is_residual_(false) { + + + int warp_offset = (warp_id / kWarpCountStrided) * WarpShape::kContiguous; + + // Per-thread offset in logical coordinates of tensor + + thread_offset_ = threadblock_offset + TensorCoord(warp_offset, 0) + + TensorCoord((thread_id & kThreadsPerRowMask) * kElementsPerAccess, 0); + + set_iteration_index(0); + + if(EnableResidualAccess) { + // compute residual offset + typename TensorCoord::Index residual_size = extent_.contiguous() % WarpShape::kContiguous; + if (residual_size) { + is_residual_ = true; + residual_offset_ = make_Coord(residual_size, 0); + } + } + } + + /// Construct a PredicatedVectorAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedVectorAccessIterator( + /// Pointer to start of vector + ConstPointer pointer, + /// Extent of vector + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + /// ID of each participating warp + int warp_id) + : PredicatedVectorAccessIterator(pointer, extent, thread_id, warp_id, + make_Coord(0, 0)) {} + + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + iteration_ = index; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + void add_tile_offset( + TensorCoord const &tile_offset) { + + thread_offset_ = + thread_offset_ + + TensorCoord(WarpShape::kContiguous * tile_offset.contiguous(), 0); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + + return reinterpret_cast( + pointer_ + + ((thread_offset_.contiguous() + iteration_ * kThreadsPerRow * kElementsPerAccess) + * sizeof_bits::value / 8)); + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + PredicatedVectorAccessIterator &operator++() { + ++iteration_; + if(iteration_ >= kIterations) + iteration_ = 0; + + return *this; + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + void advance() { + if(EnableResidualAccess && is_residual_) { + is_residual_ = false; + thread_offset_ += residual_offset_; + } + else + add_tile_offset(TensorCoord(1, 0)); + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + PredicatedVectorAccessIterator operator++(int) { + PredicatedVectorAccessIterator self(*this); + operator++(); + return self; + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return ((thread_offset_.contiguous() + + iteration_ * kThreadsPerRow * kElementsPerAccess) < extent_.contiguous()); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedVectorAccessIterator for row-major data. +/// +template < + typename Shape_, + typename WarpShape_, + typename Element_, + int ElementsPerAccess, + bool EnableResidualAccess +> +class PredicatedVectorAccessIterator< + Shape_, + WarpShape_, + Element_, + layout::RowMajor, + ElementsPerAccess, + EnableResidualAccess +> { + public: + + using Shape = Shape_; + using WarpShape = WarpShape_; + using Element = Element_; + using Layout = layout::RowMajor; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using ConstPointer = const Element *; + using NonConstPointer = typename platform::remove_const::type *; + + using UnderlyingIterator = PredicatedVectorAccessIterator< + layout::PitchLinearShape, + layout::PitchLinearShape, + Element, + layout::PitchLinear, + ElementsPerAccess, + EnableResidualAccess>; + + using AccessType = typename UnderlyingIterator::AccessType; + static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess; + static int const kRowsPerIteration = UnderlyingIterator::kRowsPerIteration; + static int const kThreads = UnderlyingIterator::kThreads; + static int const kIterations = UnderlyingIterator::kIterations; + + private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + + public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedVectorAccessIterator( + ///< Pointer to the start of the vector + ConstPointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< ID of each participating warp + int warp_id, + ///< Initial offset of threadblock + TensorCoord const &threadblock_offset) + : iterator_(pointer, layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, warp_id, + layout::PitchLinearCoord(threadblock_offset.column(), + threadblock_offset.row())) {} + + /// Construct a PredicatedVectorAccessIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + PredicatedVectorAccessIterator( + ConstPointer pointer, ///< Pointer to the start of the vector + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + int warp_id ///< ID of each participating warp + ) + : PredicatedVectorAccessIterator(pointer, extent, thread_id, warp_id, + make_Coord(0, 0)) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const &tile_offset) { + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedVectorAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedVectorAccessIterator operator++(int) { + PredicatedVectorAccessIterator self(*this); + operator++(); + return self; + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + void advance() { + iterator_.advance(); + } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { + return iterator_.valid(); + } +}; + + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..1de3e652b668aa99f1864a5235cad3d2a5c9a00f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h @@ -0,0 +1,253 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Templates implementing computing the addresses of storing of small + scale and bias vectors in the shared memory. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/tensor_ref.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// RegularScaleBiasVectorAccessIterator +/// +template +class RegularScaleBiasVectorAccessIterator; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for congruous arrangements for TensorOps +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularScaleBiasVectorAccessIterator { + public: + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + /// Element type per access + static int const kElementsPerAccess = 128 / sizeof_bits::value; + static int const kThreads = Shape::kContiguous / kElementsPerAccess; + using AccessType = Array; + + private: + // + // Data members + // + + /// Internal pointer + AccessType *pointer_; + + /// Internal byte offset + Index byte_offset_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularScaleBiasVectorAccessIterator( + TensorRef scale_bias_ref, ///< Pointer to the start of the scale and bias + ///< vector + int thread_id ///< ID of each participating thread + ) + : byte_offset_(0) { + // Per-thread offset in logical coordinates of tensor + int thread_offset = thread_id * kElementsPerAccess; + + // initialize pointer + pointer_ = + reinterpret_cast(scale_bias_ref.data() + thread_offset); + + set_iteration_index(0); + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + byte_offset_ += pointer_offset * sizeof(Element); + } + + /// Returns a pointer + CUTLASS_DEVICE + AccessType *get() const { + + char *access_byte_ptr = + reinterpret_cast(pointer_); + + return reinterpret_cast(access_byte_ptr + byte_offset_); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularScaleBiasVectorAccessIterator &operator++() { return *this; } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularScaleBiasVectorAccessIterator operator++(int) { + RegularScaleBiasVectorAccessIterator prev(*this); + this->operator++(); + + return prev; + } + + /// Adds a tile offset in the unit of tile. + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + // Multiply by 2 because we store scale and bias belong to the same stage + // next to each other. + add_pointer_offset(coord.contiguous() * Shape::kContiguous * 2); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for row major layouts +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularScaleBiasVectorAccessIterator< + Shape_, Element_, + layout::RowMajor> { + public: + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajor; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + /// Underlying iterator type + using UnderlyingIterator = RegularScaleBiasVectorAccessIterator< + layout::PitchLinearShape, Element, + layout::PitchLinear>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularScaleBiasVectorAccessIterator( + TensorRef scale_bias_ref, ///< Pointer to the start of the scale and bias + ///< vector + int thread_id ///< ID of each participating thread + ) + : iterator_({scale_bias_ref.data(), scale_bias_ref.stride()}, thread_id) { + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularScaleBiasVectorAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularScaleBiasVectorAccessIterator operator++(int) { + RegularScaleBiasVectorAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..a3e30c2fff5a6ac3b8e882002e7ab9b6b0710e0b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator.h @@ -0,0 +1,58 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing the address computation of storing of tiles + from pitch-linear rank=2 tensors. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template ::value* ThreadMap::kElementsPerAccess / 8> +class RegularTileAccessIterator; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h new file mode 100644 index 0000000000000000000000000000000000000000..bba9f66fa476140d0efc3a4182ef96be3101eef0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h @@ -0,0 +1,408 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing computing the addresses of storing of tiles + from pitch-linear rank=2 tensors. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/tensor_ref.h" + +#include "cutlass/transform/threadblock/regular_tile_access_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for congruous arrangements for TensorOps +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::PitchLinear, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Element type per access + using AccessType = Array; + + private: + // + // Data members + // + + /// Stride value + StrideIndex stride_; + + /// Internal pointer to first access of tile + AccessType *pointer_; + + /// Internal byte offset + Index byte_offset_; + + /// Iteration in the contiguous dimension + int iteration_contiguous_; + + /// Iteration in the strided dimension + int iteration_strided_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : stride_(ref.stride(0) / ThreadMap::kElementsPerAccess), + byte_offset_(0) { + + layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id); + + // initialize pointer + pointer_ = reinterpret_cast(ref.data() + ref.offset(thread_offset_base)); + + set_iteration_index(0); + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; + iteration_strided_ = index / ThreadMap::Iterations::kContiguous; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + byte_offset_ += pointer_offset * sizeof(Element); + } + + /// Returns a pointer + CUTLASS_DEVICE + AccessType *get() const { + + AccessType *access_ptr = pointer_; + + int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ + + iteration_contiguous_ * ThreadMap::Delta::kContiguous / + ThreadMap::kElementsPerAccess; + + char *access_byte_ptr = + reinterpret_cast(access_ptr + access_offset); + + return reinterpret_cast(access_byte_ptr + byte_offset_); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iteration_contiguous_; + + if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) + return *this; + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + iteration_contiguous_ = 0; + ++iteration_strided_; + + if (iteration_strided_ < ThreadMap::Iterations::kStrided) { + return *this; + } + + // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + iteration_strided_ = 0; + + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + this->operator++(); + + return prev; + } + + /// Adds a tile offset in the unit of tile. + /// In GEMM/Conv implementation, this is used to move in the k dimension in the shared memory. + /// Below layouts are the shared memory layouts. Current SM50 SIMT kernels only use col major A and row major B. + /// For row major A operand, k dimension is contiguous dimension; + /// For col major A operand, k dimension is strided dimension; + /// For row major B operand, k dimension is strided dimension; + /// For col major B operand, k dimension is contiguous dimension. + /// Below two classes map col/row major to the pitch linear coordinates used + /// in this base class. + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + add_pointer_offset(coord.contiguous() * Shape::kContiguous + + coord.strided() * Shape::kStrided * stride_ * + ThreadMap::kElementsPerAccess); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for column major layouts +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::ColumnMajor, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIterator< + layout::PitchLinearShape, Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap_>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for row major layouts +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::RowMajor, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajor; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIterator< + layout::PitchLinearShape, Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap_>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h new file mode 100644 index 0000000000000000000000000000000000000000..938b419163a4082e70e8a933fb518be8e58db5d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h @@ -0,0 +1,587 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing computing the addresses of storing of tiles + from pitch-linear rank=2 tensors. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/tensor_ref.h" + +#include "cutlass/transform/threadblock/regular_tile_access_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + + +//////////////////////////////////////////////////////////////////////////////// + +template ::value* ThreadMap::kElementsPerAccess / 8 + > +class RegularTileAccessIteratorDirectConv; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for congruous arrangements for TensorOps with dynamic_iterations OFF +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIteratorDirectConv< + Shape_, Element_, + layout::PitchLinear, + AdvanceRank, ThreadMap_, false, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Element type per access + using AccessType = Array; + + private: + // + // Data members + // + + /// Stride value + StrideIndex stride_; + + /// Internal pointer to first access of tile + AccessType *pointer_; + + /// Internal byte offset + Index byte_offset_; + + /// Iteration in the contiguous dimension + int iteration_contiguous_; + + /// Iteration in the strided dimension + int iteration_strided_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : stride_(ref.stride(0) / ThreadMap::kElementsPerAccess), + byte_offset_(0) { + + layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id); + + // initialize pointer + pointer_ = reinterpret_cast(ref.data() + ref.offset(thread_offset_base)); + + set_iteration_index(0); + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; + iteration_strided_ = index / ThreadMap::Iterations::kContiguous; + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_num(int num) { + //Do nothing + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + byte_offset_ += pointer_offset * sizeof(Element); + } + + /// Returns a pointer + CUTLASS_DEVICE + AccessType *get() const { + + AccessType *access_ptr = pointer_; + + int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ + + iteration_contiguous_ * ThreadMap::Delta::kContiguous / + ThreadMap::kElementsPerAccess; + + char *access_byte_ptr = + reinterpret_cast(access_ptr + access_offset); + + return reinterpret_cast(access_byte_ptr + byte_offset_); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIteratorDirectConv &operator++() { + ++iteration_contiguous_; + + if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) + return *this; + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + iteration_contiguous_ = 0; + ++iteration_strided_; + + if (iteration_strided_ < ThreadMap::Iterations::kStrided) { + return *this; + } + + // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + iteration_strided_ = 0; + + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIteratorDirectConv operator++(int) { + RegularTileAccessIteratorDirectConv prev(*this); + this->operator++(); + + return prev; + } + + /// Adds a tile offset in the unit of tile. + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + add_pointer_offset(coord.contiguous() * Shape::kContiguous + + coord.strided() * ThreadMap::Iterations::kStrided * + ThreadMap::Delta::kStrided * stride_ * ThreadMap::kElementsPerAccess); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for congruous arrangements for TensorOps with dynamic_iterations ON +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIteratorDirectConv< + Shape_, Element_, + layout::PitchLinear, + AdvanceRank, ThreadMap_,true, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Element type per access + using AccessType = Array; + + private: + // + // Data members + // + + /// Stride value + StrideIndex stride_; + + /// Internal pointer to first access of tile + AccessType *pointer_; + + /// Internal byte offset + Index byte_offset_; + + /// Iteration in the contiguous dimension + int iteration_contiguous_; + + /// Iteration in the strided dimension + int iteration_strided_; + + /// Total iterattions in the strided dimension: Dynamic value + int total_iteration_strided_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : stride_(ref.stride(0) / ThreadMap::kElementsPerAccess), + byte_offset_(0) { + + layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id); + + // initialize pointer + pointer_ = reinterpret_cast(ref.data() + ref.offset(thread_offset_base)); + + set_iteration_index(0); + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; + iteration_strided_ = index / ThreadMap::Iterations::kContiguous; + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_num(int num) { + total_iteration_strided_ = num; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + byte_offset_ += pointer_offset * sizeof(Element); + } + + /// Returns a pointer + CUTLASS_DEVICE + AccessType *get() const { + + AccessType *access_ptr = pointer_; + + int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ + + iteration_contiguous_ * ThreadMap::Delta::kContiguous / + ThreadMap::kElementsPerAccess; + + char *access_byte_ptr = + reinterpret_cast(access_ptr + access_offset); + + return reinterpret_cast(access_byte_ptr + byte_offset_); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIteratorDirectConv &operator++() { + ++iteration_contiguous_; + + if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) + return *this; + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + iteration_contiguous_ = 0; + ++iteration_strided_; + + if (iteration_strided_ < total_iteration_strided_) { + return *this; + } + + // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + iteration_strided_ = 0; + + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIteratorDirectConv operator++(int) { + RegularTileAccessIteratorDirectConv prev(*this); + this->operator++(); + + return prev; + } + + /// Adds a tile offset in the unit of tile. + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + add_pointer_offset(coord.contiguous() * Shape::kContiguous + + coord.strided() * total_iteration_strided_ * ThreadMap::Delta::kStrided * stride_ * + ThreadMap::kElementsPerAccess); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for column major layouts +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIteratorDirectConv< + Shape_, Element_, + layout::ColumnMajor, + AdvanceRank, ThreadMap_, Dynamic_iterations , Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIteratorDirectConv< + layout::PitchLinearShape, Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap_, + Dynamic_iterations>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_num(int num) { + iterator_.set_iteration_num(num); + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIteratorDirectConv &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIteratorDirectConv operator++(int) { + RegularTileAccessIteratorDirectConv prev(*this); + ++iterator_; + + return prev; + } +}; + + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for row major layouts +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIteratorDirectConv< + Shape_, Element_, + layout::RowMajor, + AdvanceRank, ThreadMap_, Dynamic_iterations, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajor; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIteratorDirectConv< + layout::PitchLinearShape, Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap_, + Dynamic_iterations>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_num(int num) { + iterator_.set_iteration_num(num); + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIteratorDirectConv &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIteratorDirectConv operator++(int) { + RegularTileAccessIteratorDirectConv prev(*this); + ++iterator_; + + return prev; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h new file mode 100644 index 0000000000000000000000000000000000000000..c16daff080868b4016cca4443fb5c519cbdb05d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h @@ -0,0 +1,820 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing computing the addresses of storing of tiles + from pitch-linear rank=2 tensors. +*/ + +#pragma once + +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for congruous arrangements for TensorOps +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::TensorOpMultiplicandCongruous::value, + int(128 / sizeof(Element_))>, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = + layout::TensorOpMultiplicandCongruous::value, + int(128 / sizeof(Element_))>; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Internal details made public to facilitate introspection + struct Detail { + /// This iterator is specialized for an access size that is 128 bits in + /// length. + static int const kAccessSizeInBits = 128; + + static_assert(sizeof_bits::value * + ThreadMap::kElementsPerAccess == + kAccessSizeInBits, + "This iterator requires a policy whose access size is 128bs"); + + ///< Number of pointers + static int const kPointerCount = + (ThreadMap::Iterations::kStrided > 1 ? 2 : 1); + }; + + /// Element type per access + using AccessType = Array; + + private: + // + // Data members + // + + /// Stride value + StrideIndex stride_; + + /// Internal pointer to first access of tile + AccessType *pointer_[Detail::kPointerCount]; + + /// Internal byte offset + Index byte_offset_; + + /// Iteration in the contiguous dimension + int iteration_contiguous_; + + /// Iteration in the strided dimension + int iteration_strided_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : stride_(ref.stride(0) / Layout::kElementsPerAccess), + byte_offset_(0) { + layout::PitchLinearCoord thread_offset_base = + ThreadMap::initial_offset(thread_id); + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Detail::kPointerCount; ++i) { + // This is the offset of a thread within a threadblock tile for a specific + // pointer (units of elements) + layout::PitchLinearCoord thread_offset_in_threadblock_tile = + thread_offset_base + + layout::PitchLinearCoord{ + 0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i}; + + // initialize pointer + pointer_[i] = reinterpret_cast( + ref.data() + ref.offset(thread_offset_in_threadblock_tile)); + } + + set_iteration_index(0); + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; + iteration_strided_ = index / ThreadMap::Iterations::kContiguous; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + byte_offset_ += pointer_offset * sizeof(Element); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + AccessType *access_ptr = pointer_[iteration_strided_ & 1]; + int stride_idx = (iteration_strided_ & ~1); + + int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ + + iteration_contiguous_ * ThreadMap::Delta::kContiguous / + ThreadMap::kElementsPerAccess; + + char *access_byte_ptr = + reinterpret_cast(access_ptr + access_offset); + return reinterpret_cast(access_byte_ptr + byte_offset_); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iteration_contiguous_; + + if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) + return *this; + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + iteration_contiguous_ = 0; + ++iteration_strided_; + + if (iteration_strided_ < ThreadMap::Iterations::kStrided) { + return *this; + } + + // Enter here only if (iteration_strided_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + iteration_strided_ = 0; + + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + this->operator++(); + + return prev; + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + add_pointer_offset(coord.contiguous() * Shape::kContiguous + + coord.strided() * Shape::kStrided * stride_ * + Layout::kElementsPerAccess); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for column-major congruous TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::ColumnMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(Element_))>, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for column-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(Element_))>; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicandCongruous::value, + int(128 / sizeof(Element_))>, + (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for row-major congruous TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::RowMajorTensorOpMultiplicandCongruous::value, + int(128 / sizeof(Element_))>, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for row-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(Element_))>; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicandCongruous::value, + int(128 / sizeof(Element_))>, + (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for crosswise arrangements for TensorOps +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator::value, Crosswise>, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = + layout::TensorOpMultiplicandCrosswise::value, + Crosswise>; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + static int const kCrosswise = Crosswise; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + static_assert(!(ThreadMap::Delta::kContiguous % kCrosswise), + "kCrosswise is the smallest unit in the contiguous dimension " + "for shared memory swizzling."); + + /// Internal details made public to facilitate introspection + struct Detail { + /// This iterator is specialized for an access size that is 128 bits in + /// length. + static int const kAccessSizeInBits = 128; + + static_assert(sizeof_bits::value * + ThreadMap::kElementsPerAccess == + kAccessSizeInBits, + "This iterator requires a policy whose access size is 128bs"); + + /// Number of pointers + /// + /// Note:TN kblock32 layouts only needs 1 pointer, but strangely + /// reducing pointer count hurts perfomrnace + static int const kPointerCount = + (ThreadMap::Iterations::kStrided > 1 ? 2 : 1); + }; + + /// Element type per access + using AccessType = Array; + + private: + // + // Data members + // + + /// Total number of sections. The memory is divided into stages. One stage + /// can store one tile. Stage is divided into sections. Interleaved layout + /// can have multiple sections in a stage. The rest layout only has one section + /// in a stage. + int sections_; + + /// Sections that a stage has + int sections_per_stage_; + + /// Stride value + StrideIndex stride_; + + /// Internal pointer to first access of tile + AccessType *pointer_[Detail::kPointerCount]; + + /// Internal byte offset + Index byte_offset_; + + /// Iteration in the contiguous dimension + int iteration_contiguous_; + + /// Iteration in the strided dimension + int iteration_strided_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : sections_(ref.stride(0) / kCrosswise), + sections_per_stage_(Shape::kContiguous / kCrosswise), + // stride_ = kCrosswise x sections_ x kFactor + stride_(ref.stride(0) * Layout::kFactor / Layout::kElementsPerAccess), + byte_offset_(0) { + layout::PitchLinearCoord thread_offset_base = + ThreadMap::initial_offset(thread_id); + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Detail::kPointerCount; ++i) { + // This is the offset of a thread within a threadblock tile for a specific + // pointer (units of elements) + layout::PitchLinearCoord thread_offset_in_threadblock_tile = + thread_offset_base + + layout::PitchLinearCoord{ + 0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i}; + // initialize pointer + pointer_[i] = reinterpret_cast(ref.data()) + + ref.offset(thread_offset_in_threadblock_tile) / + Layout::kElementsPerAccess; + } + + set_iteration_index(0); + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; + iteration_strided_ = index / ThreadMap::Iterations::kContiguous; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + byte_offset_ += pointer_offset * sizeof_bits::value / 8; + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + AccessType *access_ptr = pointer_[iteration_strided_ & 1]; + int stride_idx = (iteration_strided_ & ~1); + + int access_offset = + stride_idx * ThreadMap::Delta::kStrided * stride_ / Layout::kFactor + + // kCrosswise elements in the contiguous dimension would span to a + // shared memory cache line. + iteration_contiguous_ * (ThreadMap::Delta::kContiguous / kCrosswise) * + Layout::TileShape::kContiguous; + char *access_byte_ptr = + reinterpret_cast(access_ptr + access_offset); + return reinterpret_cast(access_byte_ptr + byte_offset_); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iteration_contiguous_; + + if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) + return *this; + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + iteration_contiguous_ = 0; + ++iteration_strided_; + + if (iteration_strided_ < ThreadMap::Iterations::kStrided) { + return *this; + } + + // Enter here only if (iteration_strided_ == ThreadMap::Iteration::kStrided) + // which means we enter the next section. + iteration_strided_ = 0; + + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + this->operator++(); + + return prev; + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + add_pointer_offset(coord.contiguous() * sections_per_stage_ * stride_ * + ThreadMap::kElementsPerAccess / sections_ + + coord.strided() * Shape::kStrided * stride_ * + Layout::kElementsPerAccess / Layout::kFactor); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for column-major crosswise TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Crosswise>, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for column-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Crosswise>; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicandCrosswise::value, + Crosswise>, + (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for row-major crosswise TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator::value, Crosswise>, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for row-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Crosswise>; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicandCrosswise::value, + Crosswise>, + (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h new file mode 100644 index 0000000000000000000000000000000000000000..2b116d0b810b2f30963f4c7b4de0ab9e347c8162 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h @@ -0,0 +1,1532 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing computing the addresses of storing of tiles + from pitch-linear rank=2 tensors. +*/ + +#pragma once + +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" +#include "cutlass/layout/tensor_op_multiplicand_sm80.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for congruous arrangements for TensorOps +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::TensorOpMultiplicandCongruous64b, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::TensorOpMultiplicandCongruous64b; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + static_assert(ThreadMap::kThreads / 32 > 1, + "This tile iterator requires at least two warps."); + + /// Internal details made public to facilitate introspection + struct Detail { + /// This iterator is specialized for an access size that is 128 bits in + /// length. + static int const kAccessSizeInBits = 64; + + static_assert(sizeof_bits::value * + ThreadMap::kElementsPerAccess == + kAccessSizeInBits, + "This iterator requires a policy whose access size is 64b"); + + ///< Number of pointers + static int const kPointerCount = 1; + }; + + /// Element type per access + using AccessType = Array; + + private: + // + // Data members + // + + /// Stride value + StrideIndex stride_; + + /// Internal pointer to first access of tile + AccessType *pointer_; + + /// Internal byte offset + Index byte_offset_; + + /// Iteration in the contiguous dimension + int iteration_contiguous_; + + /// Iteration in the strided dimension + int iteration_strided_; + + public: + + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator( + TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ): + stride_(ref.stride(0) / Layout::kElementsPerAccess), + byte_offset_(0) { + + layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id); + + // This is the offset of a thread within a threadblock tile for a specific + // pointer (units of elements) + layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base; + + // initialize pointer + pointer_ = reinterpret_cast(ref.data() + ref.offset(thread_offset_in_threadblock_tile)); + + set_iteration_index(0); + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + + iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; + iteration_strided_ = index / ThreadMap::Iterations::kContiguous; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + + byte_offset_ += pointer_offset * sizeof(Element); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + + AccessType *access_ptr = pointer_; + + int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ + + iteration_contiguous_ * ThreadMap::Delta::kContiguous / + ThreadMap::kElementsPerAccess; + + char *access_byte_ptr = + reinterpret_cast(access_ptr + access_offset); + + return reinterpret_cast(access_byte_ptr + byte_offset_); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iteration_contiguous_; + + if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) + return *this; + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + iteration_contiguous_ = 0; + ++iteration_strided_; + + if (iteration_strided_ < ThreadMap::Iterations::kStrided) { + return *this; + } + + // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + iteration_strided_ = 0; + + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + + RegularTileAccessIterator prev(*this); + + this->operator++(); + + return prev; + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + + add_pointer_offset( + coord.contiguous() * Shape::kContiguous + + coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for column-major congruous TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::ColumnMajorTensorOpMultiplicandCongruous64b, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for column-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous64b; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicandCongruous64b, + (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for row-major congruous TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for row-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajorTensorOpMultiplicandCongruous64b; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicandCongruous64b, + (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for crosswise arrangements for TensorOps +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::TensorOpMultiplicand64bCrosswise, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::TensorOpMultiplicand64bCrosswise; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + static_assert(ThreadMap::kThreads / 32 > 1, + "This tile iterator requires at least two warps."); + + /// Internal details made public to facilitate introspection + struct Detail { + /// This iterator is specialized for an access size that is 128 bits in + /// length. + static int const kAccessSizeInBits = 64; + + static_assert(sizeof_bits::value * + ThreadMap::kElementsPerAccess == + kAccessSizeInBits, + "This iterator requires a policy whose access size is 64b"); + + ///< Number of pointers - two pointers are needed if making more than 4 iterations along + ///< strided dimension + static int const kPointerCount = (ThreadMap::Iterations::kStrided > 4 ? 2 : 1); + }; + + /// Element type per access + using AccessType = Array; + + private: + // + // Data members + // + + /// Stride value + StrideIndex stride_; + + /// Internal pointer to first access of tile + AccessType *pointer_; + + /// Internal byte offset + Index byte_offset_[Detail::kPointerCount]; + + /// Iteration in the contiguous dimension + int iteration_contiguous_; + + /// Iteration in the strided dimension + int iteration_strided_; + + public: + + /// Construct a TileIterator with zero threadblock offset + CUTLASS_DEVICE + RegularTileAccessIterator( + TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ): + stride_(ref.stride(0) / ThreadMap::kElementsPerAccess) { + + layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id); + + // This is the offset of a thread within a threadblock tile for a specific + // pointer (units of elements) + layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base; + + // initialize pointer + pointer_ = reinterpret_cast(ref.data()); + + byte_offset_[0] = ref.offset(thread_offset_in_threadblock_tile) * sizeof(Element); + + if (Detail::kPointerCount == 2) { + byte_offset_[1] = byte_offset_[0] ^ 8; + } + + set_iteration_index(0); + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + + iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; + iteration_strided_ = index / ThreadMap::Iterations::kContiguous; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + + pointer_ += pointer_offset / ThreadMap::kElementsPerAccess; + } + + /// Returns a pointer + CUTLASS_DEVICE + AccessType *get() const { + + // Map the logical contiguous and strided access to the internal swizzled structure. + int uniform_offset = (iteration_strided_ & 0x3) * stride_ + (iteration_strided_ >> 3) * 16 + stride_ * ThreadMap::Delta::kContiguous * iteration_contiguous_; + + char *access_byte_ptr = reinterpret_cast(pointer_ + uniform_offset); + + int byte_offset; + + // This iterator may require two byte offsets if it must load more than 8 rows (or 2 iterations) + // in the strided dimension + if (Detail::kPointerCount == 2 && (iteration_strided_ & 0x4)) { + byte_offset = byte_offset_[1]; + } + else { + byte_offset = byte_offset_[0]; + } + + return reinterpret_cast(access_byte_ptr + byte_offset); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iteration_contiguous_; + + if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) + return *this; + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + iteration_contiguous_ = 0; + ++iteration_strided_; + + if (iteration_strided_ < ThreadMap::Iterations::kStrided) { + return *this; + } + + // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + iteration_strided_ = 0; + + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + + RegularTileAccessIterator prev(*this); + + this->operator++(); + + return prev; + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + + add_pointer_offset(coord.strided() * Shape::kStrided + coord.contiguous() * Shape::kContiguous * stride_); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for column-major crosswise TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::ColumnMajorTensorOpMultiplicand64bCrosswise, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for column-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajorTensorOpMultiplicand64bCrosswise; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicand64bCrosswise, + (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for row-major crosswise TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for row-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajorTensorOpMultiplicand64bCrosswise; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicand64bCrosswise, + (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for congruous arrangements for TensorOps +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::TensorOpMultiplicandCongruous128b, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::TensorOpMultiplicandCongruous128b; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + static_assert(ThreadMap::kThreads / 32 > 1, + "This tile iterator requires at least two warps."); + + /// Internal details made public to facilitate introspection + struct Detail { + /// This iterator is specialized for an access size that is 128 bits in + /// length. + static int const kAccessSizeInBits = 128; + + static_assert(sizeof_bits::value * + ThreadMap::kElementsPerAccess == + kAccessSizeInBits, + "This iterator requires a policy whose access size is 128b"); + + ///< Number of pointers + static int const kPointerCount = 1; + }; + + /// Element type per access + using AccessType = Array; + + private: + // + // Data members + // + + /// Stride value + StrideIndex stride_; + + /// Internal pointer to first access of tile + AccessType *pointer_; + + /// Internal byte offset + Index byte_offset_; + + /// Iteration in the contiguous dimension + int iteration_contiguous_; + + /// Iteration in the strided dimension + int iteration_strided_; + + public: + + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator( + TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ): + stride_(ref.stride(0) / Layout::kElementsPerAccess), + byte_offset_(0) { + + layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id); + + // This is the offset of a thread within a threadblock tile for a specific + // pointer (units of elements) + layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base; + + // initialize pointer + pointer_ = reinterpret_cast(ref.data() + ref.offset(thread_offset_in_threadblock_tile)); + + set_iteration_index(0); + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + + iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; + iteration_strided_ = index / ThreadMap::Iterations::kContiguous; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + + byte_offset_ += pointer_offset * sizeof(Element); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + + AccessType *access_ptr = pointer_; + + int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ + + iteration_contiguous_ * ThreadMap::Delta::kContiguous / + ThreadMap::kElementsPerAccess; + + char *access_byte_ptr = + reinterpret_cast(access_ptr + access_offset); + + return reinterpret_cast(access_byte_ptr + byte_offset_); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iteration_contiguous_; + + if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) + return *this; + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + iteration_contiguous_ = 0; + ++iteration_strided_; + + if (iteration_strided_ < ThreadMap::Iterations::kStrided) { + return *this; + } + + // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + iteration_strided_ = 0; + + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + + RegularTileAccessIterator prev(*this); + + this->operator++(); + + return prev; + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + + add_pointer_offset( + coord.contiguous() * Shape::kContiguous + + coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for column-major congruous TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::ColumnMajorTensorOpMultiplicandCongruous128b, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for column-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous128b; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicandCongruous128b, + (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for row-major congruous TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for row-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajorTensorOpMultiplicandCongruous128b; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicandCongruous128b, + (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator( + TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ): + iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for congruous arrangements for TensorOps +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::TensorOpMultiplicandCrosswise128x4, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::TensorOpMultiplicandCrosswise128x4; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + static_assert(ThreadMap::kThreads / 32 > 1, + "This tile iterator requires at least two warps."); + + /// Internal details made public to facilitate introspection + struct Detail { + /// This iterator is specialized for an access size that is 128 bits in + /// length. + static int const kAccessSizeInBits = 128; + + static_assert(sizeof_bits::value * + ThreadMap::kElementsPerAccess == + kAccessSizeInBits, + "This iterator requires a policy whose access size is 128b"); + + ///< Number of pointers + static int const kPointerCount = 1; + }; + + + static_assert(!(ThreadMap::Iterations::kStrided % 2), "This iterator requires at least two iterations along the strided dimension"); + + /// Element type per access + using AccessType = Array; + + private: + // + // Data members + // + + /// Stride value + StrideIndex stride_; + + /// Internal pointer to first access of tile + AccessType *pointer_; + + /// Internal byte offset + Index byte_offset_; + + /// Iteration in the contiguous dimension + int iteration_contiguous_; + + /// Iteration in the strided dimension + int iteration_strided_; + + public: + + /// Construct a TileIterator with zero threadblock offset + CUTLASS_DEVICE + RegularTileAccessIterator( + TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ): + stride_(ref.stride(0) / Layout::kElementsPerAccess), + byte_offset_(0) { + + layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id); + + // This is the offset of a thread within a threadblock tile for a specific + // pointer (units of elements) + layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base; + + // initialize pointer + pointer_ = reinterpret_cast(ref.data() + ref.offset(thread_offset_in_threadblock_tile)); + + set_iteration_index(0); + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + + iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; + iteration_strided_ = index / ThreadMap::Iterations::kContiguous; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + + byte_offset_ += pointer_offset * sizeof(Element); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + + AccessType *access_ptr = pointer_; + + int offset_c = (iteration_contiguous_ * ThreadMap::Delta::kContiguous + (iteration_strided_ & 1) * 2); + int offset_s = (iteration_strided_ / 2) * 8; + + int access_offset = offset_c * stride_ + offset_s; + + char *access_byte_ptr = + reinterpret_cast(access_ptr + access_offset); + + return reinterpret_cast(access_byte_ptr + byte_offset_); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iteration_contiguous_; + + if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) + return *this; + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + iteration_contiguous_ = 0; + ++iteration_strided_; + + if (iteration_strided_ < ThreadMap::Iterations::kStrided) { + return *this; + } + + // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + iteration_strided_ = 0; + + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + + RegularTileAccessIterator prev(*this); + + this->operator++(); + + return prev; + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + + add_pointer_offset( + coord.contiguous() * Shape::kContiguous * stride_ + + coord.strided() * Shape::kStrided * Layout::kElementsPerAccess); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for column-major congruous TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator< + Shape_, Element_, + layout::ColumnMajorTensorOpMultiplicandCrosswise128x4, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for column-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicandCrosswise128x4, + (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for row-major congruous TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileAccessIterator { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for row-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajorTensorOpMultiplicandCrosswise128x4; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileAccessIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicandCrosswise128x4, + (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; + + using AccessType = typename UnderlyingIterator::AccessType; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileAccessIterator( + TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ): + iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return reinterpret_cast(iterator_.get()); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileAccessIterator operator++(int) { + RegularTileAccessIterator prev(*this); + ++iterator_; + + return prev; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..26d7da7e5d5a54f0e574a9955f88af54895c7509 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator.h @@ -0,0 +1,62 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing storing of tiles from pitch-linear rank=2 tensors. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/numeric_types.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +template < + typename Shape, + typename Element, + typename Layout, + int AdvanceRank, + typename ThreadMap, + int Alignment = sizeof_bits::value * ThreadMap::kElementsPerAccess / 8 +> +class RegularTileIterator; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h new file mode 100644 index 0000000000000000000000000000000000000000..27ce2cb417856e655e5d807dacc2e7f2fe9b87f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h @@ -0,0 +1,552 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing loading of tiles from pitch-linear rank=2 tensors. + + This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile + first, with the objective of minimizing predicate mask updates during steady-state operation. + + A precomputed "Params" object minimizes the amount of state that must be stored in registers, + and integer addition is used to advance the pointer through memory. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/pitch_linear.h" + +#include "regular_tile_iterator.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Regular tile iterator specialized for pitch-linear. This one is used by 2-stage SIMT kernels +/// and sparse tensor core meta data. +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator { +public: + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using Fragment = Array; + + using AccessType = AlignedArray; + + static_assert(kAdvanceRank == 0 || kAdvanceRank == 1, + "Advance rank may only be along the contiguous or strided dimensions."); + +private: + + // + // Types + // + + // + // Data members + // + + /// Pointer to memory + uint8_t *pointer_; + + /// Stride quantity + StrideIndex stride_; + + /// Amount to increment pointer along strided dimension + Index increment_strided_; + + /// Amount to advance pointer between tiles + Index increment_advance_; + +public: + + CUTLASS_DEVICE + RegularTileIterator(): pointer_(nullptr), increment_strided_(0), increment_advance_(0) { } + + CUTLASS_DEVICE + RegularTileIterator( + TensorRef const &ref, + int thread_idx + ): + pointer_(reinterpret_cast(ref.data()) + (ref.offset(ThreadMap::initial_offset(thread_idx)) * sizeof_bits::value / 8)) { + + stride_ = ref.stride()[0]; + increment_strided_ = (ref.stride()[0] * sizeof_bits::value) * ThreadMap::Delta::kStrided / 8; + + increment_advance_ = + (kAdvanceRank == 0 ? + Shape::kContiguous * sizeof_bits::value / 8 : + Shape::kStrided * (ref.stride()[0] * sizeof_bits::value / 8)); + } + + /// Loads a fragment + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + + AccessType *frag_ptr = reinterpret_cast(&frag); + uint8_t const *byte_pointer = pointer_ + pointer_offset * sizeof_bits::value / 8; + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + + AccessType const *access_ptr = reinterpret_cast(byte_pointer); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + + int idx = c + s * ThreadMap::Iterations::kContiguous; + frag_ptr[idx] = access_ptr[c * ThreadMap::Delta::kContiguous / + ThreadMap::kElementsPerAccess]; + } + + if (s + 1 < ThreadMap::Iterations::kStrided) { + byte_pointer += increment_strided_; + } + } + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load(Fragment &frag, TensorCoord const & tile_offset) { + load_with_pointer_offset( + frag, + tile_offset.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess + + tile_offset.strided() * Shape::kStrided * stride_ + ); + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + + AccessType const *frag_ptr = reinterpret_cast(&frag); + uint8_t *byte_pointer = pointer_ + pointer_offset * sizeof_bits::value / 8; + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + + AccessType *access_ptr = reinterpret_cast(byte_pointer); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + + int idx = c + s * ThreadMap::Iterations::kContiguous; + access_ptr[c * ThreadMap::Delta::kContiguous / + ThreadMap::kElementsPerAccess] = frag_ptr[idx]; + } + + if (s + 1 < ThreadMap::Iterations::kStrided) { + byte_pointer += increment_strided_; + } + } + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store(Fragment const &frag, TensorCoord const & tile_offset) { + store_with_pointer_offset( + frag, + tile_offset.contiguous() * Shape::kContiguous + tile_offset.strided() * Shape::kStrided * stride_ + ); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } + + /// Advances the pointer + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + pointer_ += increment_advance_; + return *this; + } + + /// Advances the pointer + CUTLASS_HOST_DEVICE + RegularTileIterator &operator--() { + pointer_ -= increment_advance_; + return *this; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + pointer_ += pointer_offset; + } + + /// Adds a tile offset in the unit of tile. + /// In GEMM/Conv implementation, this is used to move in the k dimension in the shared memory. + /// Below layouts are the shared memory layouts. Current SM50 SIMT kernels only use col major A and row major B. + /// For row major A operand, k dimension is contiguous dimension; + /// For col major A operand, k dimension is strided dimension; + /// For row major B operand, k dimension is strided dimension; + /// For col major B operand, k dimension is contiguous dimension. + /// Below two classes map col/row major to the pitch linear coordinates used + /// in this base class. + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + int offset = sizeof_bits::value * + (coord.contiguous() * Shape::kContiguous + coord.strided() * Shape::kStrided * stride_) / 8; + add_pointer_offset(offset); + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { +#if 0 + AccessType *access_ptr = pointer_[iteration_strided_ & 1]; + int stride_idx = (iteration_strided_ & ~1); + + int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ + + iteration_contiguous_ * ThreadMap::Delta::kContiguous / + ThreadMap::kElementsPerAccess; + + char *access_byte_ptr = + reinterpret_cast(access_ptr + access_offset); + return reinterpret_cast(access_byte_ptr + byte_offset_); +#endif + return reinterpret_cast(pointer_); + } + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Regular tile iterator specialized for row major +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator { +public: + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using Fragment = Array; + + using Underlying = RegularTileIterator< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap, + kAlignment + >; + + using AccessType = typename Underlying::AccessType; + + static_assert(kAdvanceRank == 0 || kAdvanceRank == 1, + "Advance rank may only be along the row or column dimensions."); + +private: + + Underlying iterator_; + +public: + + CUTLASS_DEVICE + RegularTileIterator() { } + + CUTLASS_DEVICE + RegularTileIterator( + TensorRef const &ref, + int thread_idx + ): + iterator_({ref.data(), ref.stride()}, thread_idx) { + + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load(Fragment &frag, TensorCoord const & tile_offset) { + iterator_.load_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()}); + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load(Fragment &frag) { + iterator_.load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store(Fragment const &frag, TensorCoord const & tile_offset) { + iterator_.store_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()}); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) { + iterator_.store_with_pointer_offset(frag, 0); + } + + /// Advances the pointer + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances the pointer + CUTLASS_HOST_DEVICE + RegularTileIterator &operator--() { + --iterator_; + return *this; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return iterator_.get(); + } + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Regular tile iterator specialized for pitch-linear +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator { +public: + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using Fragment = Array; + + using Underlying = RegularTileIterator< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap + >; + + using AccessType = typename Underlying::AccessType; + + static_assert(kAdvanceRank == 0 || kAdvanceRank == 1, + "Advance rank may only be along the row or column dimensions."); + +private: + + Underlying iterator_; + +public: + + CUTLASS_DEVICE + RegularTileIterator() { } + + CUTLASS_DEVICE + RegularTileIterator( + TensorRef const &ref, + int thread_idx + ): + iterator_({ref.data(), ref.stride()}, thread_idx) { + + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load(Fragment &frag, TensorCoord const & tile_offset) { + iterator_.load_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()}); + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load(Fragment &frag) { + iterator_.load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store(Fragment const &frag, TensorCoord const & tile_offset) { + iterator_.store_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()}); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) { + iterator_.store_with_pointer_offset(frag, 0); + } + + /// Advances the pointer + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances the pointer + CUTLASS_HOST_DEVICE + RegularTileIterator &operator--() { + --iterator_; + return *this; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType *get() const { + return iterator_.get(); + } + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h new file mode 100644 index 0000000000000000000000000000000000000000..a954eb46519784b70c316c1405feda5dde79c5f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h @@ -0,0 +1,509 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing loading of tiles from pitch-linear rank=2 tensors. + + This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile + first, with the objective of minimizing predicate mask updates during steady-state operation. + + A precomputed "Params" object minimizes the amount of state that must be stored in registers, + and integer addition is used to advance the pointer through memory. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/pitch_linear.h" + +#include "regular_tile_iterator.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// +template < + typename Shape, + typename Element, + typename Layout, + int AdvanceRank, + typename ThreadMap, + int Alignment = sizeof_bits::value * ThreadMap::kElementsPerAccess / 8 +> +class RegularTileIterator2dThreadTile; + + +/// Regular tile iterator specialized for pitch-linear + 2d thread-tiled threadmapping +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator2dThreadTile { +public: + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using Fragment = Array; + + static_assert(kAdvanceRank == 0 || kAdvanceRank == 1, + "Advance rank may only be along the contiguous or strided dimensions."); + +private: + + // + // Types + // + + using AccessType = AlignedArray; + + // + // Data members + // + + /// Pointer to memory + uint8_t *pointer_; + + /// Stride quantity + StrideIndex stride_; + + /// Amount to increment pointer along strided dimension + LongIndex increment_strided_; + + /// Amount to advance pointer between tiles + LongIndex increment_advance_; + +public: + + CUTLASS_DEVICE + RegularTileIterator2dThreadTile(): pointer_(nullptr), increment_strided_(0), increment_advance_(0) { } + + CUTLASS_DEVICE + RegularTileIterator2dThreadTile( + TensorRef const &ref, + int thread_idx, + int interleave + ){ + + TensorCoord t = ThreadMap::initial_offset(thread_idx); + long int offset = t[0] * interleave + t[1] * ref.stride()[0]/interleave; + pointer_ = reinterpret_cast(ref.data() + offset); + + stride_ = ref.stride()[0] / interleave; + increment_strided_ = (ref.stride()[0] * sizeof_bits::value / 8) * ThreadMap::Delta::kStrided / interleave; + + increment_advance_ = + (kAdvanceRank == 0 ? + Shape::kContiguous * sizeof_bits::value / 8 : + Shape::kStrided * (ref.stride()[0] * sizeof_bits::value / 8) / interleave); + } + + /// Loads a fragment + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + + AccessType *frag_ptr = reinterpret_cast(&frag); + uint8_t const *byte_pointer = pointer_ + pointer_offset * sizeof_bits::value / 8; + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + + AccessType const *access_ptr = reinterpret_cast(byte_pointer); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + + int idx = c + s * ThreadMap::Iterations::kContiguous; + frag_ptr[idx] = access_ptr[c * ThreadMap::Delta::kContiguous / ThreadMap::ThreadAccessShape::kStrided]; + } + + if (s + 1 < ThreadMap::Iterations::kStrided) { + byte_pointer += increment_strided_; + } + } + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load(Fragment &frag, TensorCoord const & tile_offset) { + load_with_pointer_offset( + frag, + tile_offset.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess + + tile_offset.strided() * Shape::kStrided * stride_ + ); + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + + AccessType const *frag_ptr = reinterpret_cast(&frag); + uint8_t *byte_pointer = pointer_ + pointer_offset * sizeof_bits::value / 8; + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + + AccessType *access_ptr = reinterpret_cast(byte_pointer); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + + int idx = c + s * ThreadMap::Iterations::kContiguous; + access_ptr[c * ThreadMap::Delta::kContiguous / ThreadMap::ThreadAccessShape::kStrided] = frag_ptr[idx]; + } + + if (s + 1 < ThreadMap::Iterations::kStrided) { + byte_pointer += increment_strided_; + } + } + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store(Fragment const &frag, TensorCoord const & tile_offset) { + store_with_pointer_offset( + frag, + tile_offset.contiguous() * Shape::kContiguous + tile_offset.strided() * Shape::kStrided * stride_ + ); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } + + /// Advances the pointer + CUTLASS_HOST_DEVICE + RegularTileIterator2dThreadTile &operator++() { + pointer_ += increment_advance_; + return *this; + } + + /// Advances the pointer + CUTLASS_HOST_DEVICE + RegularTileIterator2dThreadTile &operator--() { + pointer_ -= increment_advance_; + return *this; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + pointer_ += pointer_offset; + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + int offset = sizeof_bits::value * + (coord.contiguous() * Shape::kContiguous + coord.strided() * Shape::kStrided * stride_) / 8; + add_pointer_offset(offset); + } + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Regular tile iterator specialized for interleaved layout + 2d thread-tiled threadmapping +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator2dThreadTile, AdvanceRank, ThreadMap_, Alignment> { +public: + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajorInterleaved<4>; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using Fragment = Array; + + using Underlying = RegularTileIterator2dThreadTile< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap, + kAlignment + >; + + static_assert(kAdvanceRank == 0 || kAdvanceRank == 1, + "Advance rank may only be along the row or column dimensions."); + +private: + + Underlying iterator_; + +public: + + CUTLASS_DEVICE + RegularTileIterator2dThreadTile() { } + + CUTLASS_DEVICE + RegularTileIterator2dThreadTile( + TensorRef const &ref, + int thread_idx + ): + iterator_({ref.data(), ref.stride()}, thread_idx, 4) { + + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load(Fragment &frag, TensorCoord const & tile_offset) { + iterator_.load_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()}); + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load(Fragment &frag) { + iterator_.load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store(Fragment const &frag, TensorCoord const & tile_offset) { + iterator_.store_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()}); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) { + iterator_.store_with_pointer_offset(frag, 0); + } + + /// Advances the pointer + CUTLASS_HOST_DEVICE + RegularTileIterator2dThreadTile &operator++() { + ++iterator_; + return *this; + } + + /// Advances the pointer + CUTLASS_HOST_DEVICE + RegularTileIterator2dThreadTile &operator--() { + --iterator_; + return *this; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Regular tile iterator specialized for interleaved layout + 2d thread-tiled threadmapping +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator2dThreadTile, AdvanceRank, ThreadMap_, Alignment> { +public: + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajorInterleaved<4>; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using Fragment = Array; + using PitchLinearThreadMap = PitchLinearStripminedThreadMap< layout::PitchLinearShape, + ThreadMap::kThreads, ThreadMap::ThreadAccessShape::kCount >; + + + using Underlying = RegularTileIterator2dThreadTile< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap + >; + + static_assert(kAdvanceRank == 0 || kAdvanceRank == 1, + "Advance rank may only be along the row or column dimensions."); + +private: + + Underlying iterator_; + +public: + + CUTLASS_DEVICE + RegularTileIterator2dThreadTile() { } + + CUTLASS_DEVICE + RegularTileIterator2dThreadTile( + TensorRef const &ref, + int thread_idx + ): + iterator_({ref.data(), ref.stride()}, thread_idx, 4) { + + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load(Fragment &frag, TensorCoord const & tile_offset) { + iterator_.load_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()}); + } + + /// Loads a fragment + CUTLASS_HOST_DEVICE + void load(Fragment &frag) { + iterator_.load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store(Fragment const &frag, TensorCoord const & tile_offset) { + iterator_.store_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()}); + } + + /// Stores a fragment + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) { + iterator_.store_with_pointer_offset(frag, 0); + } + + /// Advances the pointer + CUTLASS_HOST_DEVICE + RegularTileIterator2dThreadTile &operator++() { + ++iterator_; + return *this; + } + + /// Advances the pointer + CUTLASS_HOST_DEVICE + RegularTileIterator2dThreadTile &operator--() { + --iterator_; + return *this; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h new file mode 100644 index 0000000000000000000000000000000000000000..8ea0efadcd74230605d7a51c5510195c41123b56 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h @@ -0,0 +1,1107 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing storing of tiles from pitch-linear rank=2 tensors. +*/ + +#pragma once + +#include "cutlass/transform/threadblock/regular_tile_iterator.h" +#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for congruous arrangements for TensorOps +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileIterator< + Shape_, Element_, + layout::TensorOpMultiplicandCongruous::value, + int(128 / sizeof(Element_))>, + AdvanceRank, ThreadMap_, Alignment> { + public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = + layout::TensorOpMultiplicandCongruous::value, + int(128 / sizeof(Element))>; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Internal details made public to facilitate introspection + struct Detail { + + /// This iterator is specialized for an access size that is 128 bits in length. + static int const kAccessSizeInBits = 128; + + static_assert( + sizeof_bits::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits, + "This iterator requires a policy whose access size is 128bs"); + }; + +private: + + /// Element type per access + using AccessType = Array; + +public: + + /// Fragment object to be loaded or stored + using Fragment = Array; + + /// Underlying iterator to compute the addresses + using TileAccessIterator = RegularTileAccessIterator; + +private: + + // + // Data members + // + + /// Data member to the tile access iterator + TileAccessIterator address_iterator_; + +public: + + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : address_iterator_(ref, thread_id) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + address_iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + address_iterator_.add_tile_offset({0, 1}); + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + RegularTileIterator prev(*this); + this->operator++(); + + return prev; + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + address_iterator_.add_tile_offset(coord); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + load_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_byte_offset(Fragment &frag, Index byte_offset) { + address_iterator_.set_iteration_index(0); + AccessType *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + int access_idx = c + s * ThreadMap::Iterations::kContiguous; + + char const *byte_ptr = reinterpret_cast(address_iterator_.get()) + byte_offset; + AccessType const *access_ptr = reinterpret_cast(byte_ptr); + + frag_ptr[access_idx] = *access_ptr; + ++address_iterator_; + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + store_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const &frag, Index byte_offset) { + address_iterator_.set_iteration_index(0); + AccessType const *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + int access_idx = c + s * ThreadMap::Iterations::kContiguous; + + char *byte_ptr = reinterpret_cast(address_iterator_.get()) + byte_offset; + AccessType *access_ptr = reinterpret_cast(byte_ptr); + + *access_ptr = frag_ptr[access_idx]; + ++address_iterator_; + } + } + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_byte_offset(frag, 0); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for column-major congruous TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileIterator< + Shape_, Element_, + layout::ColumnMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(Element_))>, + AdvanceRank, ThreadMap_, Alignment> { + public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for column-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(Element))>; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicandCongruous::value, + int(128 / sizeof(Element))>, + (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; + + public: + + /// Fragment object to be loaded or stored + using Fragment = Array; + +private: + + /// Underlying iterator + UnderlyingIterator iterator_; + +public: + + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator( + TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ): iterator_({ref.data(), ref.stride()}, thread_id) { + + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + RegularTileIterator prev(*this); + ++iterator_; + + return prev; + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, + Index pointer_offset) { + + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for row-major congruous TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileIterator< + Shape_, Element_, + layout::RowMajorTensorOpMultiplicandCongruous::value, + int(128 / sizeof(Element_))>, + AdvanceRank, ThreadMap_, Alignment> { + public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for row-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajorTensorOpMultiplicandCongruous< + sizeof_bits::value, int(128 / sizeof(Element))>; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicandCongruous::value, + int(128 / sizeof(Element))>, + (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; + + public: + + /// Fragment object to be loaded or stored + using Fragment = Array; + +private: + + /// Underlying iterator + UnderlyingIterator iterator_; + +public: + + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator( + TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ): iterator_({ref.data(), ref.stride()}, thread_id) { + + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + + RegularTileIterator prev(*this); + ++iterator_; + + return prev; + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, + Index pointer_offset) { + + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for crosswise arrangements for TensorOps +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileIterator::value, Crosswise>, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = + layout::TensorOpMultiplicandCrosswise::value, + Crosswise>; + + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Internal details made public to facilitate introspection + struct Detail { + /// This iterator is specialized for an access size that is 128 bits in + /// length. + static int const kAccessSizeInBits = 128; + + static_assert(sizeof_bits::value * ThreadMap::kElementsPerAccess == + kAccessSizeInBits, + "This iterator requires a policy whose access size is 128bs"); + }; + + private: + /// Element type per access + using AccessType = Array; + + public: + /// Fragment object to be loaded or stored + using Fragment = + Array; + + /// Underlying iterator to compute the addresses + using TileAccessIterator = RegularTileAccessIterator; + + private: + // + // Data members + // + + /// Data member to the tile access iterator + TileAccessIterator address_iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : address_iterator_(ref, thread_id) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + address_iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + address_iterator_.add_tile_offset({1, 0}); + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + RegularTileIterator prev(*this); + this->operator++(); + + return prev; + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + address_iterator_.add_tile_offset(coord); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + address_iterator_.set_iteration_index(0); + AccessType *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + int access_idx = c + s * ThreadMap::Iterations::kContiguous; + frag_ptr[access_idx] = *(address_iterator_.get() + pointer_offset); + ++address_iterator_; + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + store_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const &frag, Index byte_offset) { + address_iterator_.set_iteration_index(0); + AccessType const *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + int access_idx = c + s * ThreadMap::Iterations::kContiguous; + + char *byte_ptr = reinterpret_cast(address_iterator_.get()) + byte_offset; + AccessType *access_ptr = reinterpret_cast(byte_ptr); + + *access_ptr = frag_ptr[access_idx]; + ++address_iterator_; + } + } + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for column-major crosswise TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileIterator::value, Crosswise>, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for column-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Crosswise>; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicandCrosswise::value, + Crosswise>, + (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; + + public: + /// Fragment object to be loaded or stored + using Fragment = Array; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + RegularTileIterator prev(*this); + ++iterator_; + + return prev; + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for row-major crosswise TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileIterator::value, Crosswise>, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for row-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajorTensorOpMultiplicandCrosswise< + sizeof_bits::value, Crosswise>; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileIterator< + layout::PitchLinearShape, Element, + layout::TensorOpMultiplicandCrosswise::value, + Crosswise>, + (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; + + public: + /// Fragment object to be loaded or stored + using Fragment = Array; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + RegularTileIterator prev(*this); + ++iterator_; + + return prev; + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for k interleaved arrangements for TensorOps +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template +class RegularTileIterator< + Shape_, Element_, + layout::TensorOpMultiplicandRowMajorInterleaved::value, + InterleavedK>, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = + layout::TensorOpMultiplicandRowMajorInterleaved::value, + InterleavedK>; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Internal details made public to facilitate introspection + struct Detail { + /// This iterator is specialized for an access size that is 128 bits in + /// length. + static int const kAccessSizeInBits = 128; + + static_assert(sizeof_bits::value * ThreadMap::kElementsPerAccess == + kAccessSizeInBits, + "This iterator requires a policy whose access size is 128bs"); + }; + + private: + + /// Element type per access + using AccessType = Array; + + public: + /// Fragment object to be loaded or stored + using Fragment = + Array; + + /// Underlying iterator to compute the addresses + using TileAccessIterator = RegularTileAccessIterator; + + private: + // + // Data members + // + + /// Data member to the tile access iterator + TileAccessIterator address_iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : address_iterator_(ref, thread_id) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + address_iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + address_iterator_.add_pointer_offset(Shape::kCount); + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + RegularTileIterator prev(*this); + this->operator++(); + + return prev; + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + address_iterator_.add_pointer_offset(coord.contiguous() * Shape::kCount); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + address_iterator_.set_iteration_index(0); + AccessType *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + int access_idx = c + s * ThreadMap::Iterations::kContiguous; + frag_ptr[access_idx] = *(address_iterator_.get() + pointer_offset); + ++address_iterator_; + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + AccessType const *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + int access_idx = c + s * ThreadMap::Iterations::kContiguous; + *(address_iterator_.get() + pointer_offset) = frag_ptr[access_idx]; + ++address_iterator_; + } + } + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for k interleaved arrangements for TensorOps +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// + +template +class RegularTileIterator< + Shape_, Element_, + layout::TensorOpMultiplicandColumnMajorInterleaved::value, + InterleavedK>, + AdvanceRank, ThreadMap_, Alignment> { + + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = + layout::TensorOpMultiplicandColumnMajorInterleaved::value, + InterleavedK>; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileIterator< + cutlass::MatrixShape, + Element, + layout::TensorOpMultiplicandRowMajorInterleaved::value, InterleavedK>, + (kAdvanceRank == 1 ? 0 : 1), + ThreadMap + >; + + public: + /// Fragment object to be loaded or stored + using Fragment = Array; + + private: + + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + RegularTileIterator prev(*this); + ++iterator_; + + return prev; + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.strided(), coord.contiguous()}); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op_sm70.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op_sm70.h new file mode 100644 index 0000000000000000000000000000000000000000..3d6fff9e491bc70bf7942b64b85e516abaf3025c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op_sm70.h @@ -0,0 +1,1460 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing loading of tiles from pitch-linear rank=2 tensors. + + This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile + first, with the objective of minimizing predicate mask updates during steady-state operation. + + A precomputed "Params" object minimizes the amount of state that must be stored in registers, + and integer addition is used to advance the pointer through memory. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm70.h" + +#include "cutlass/transform/threadblock/regular_tile_iterator.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator specialized for congruous arrangements for TensorOps +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator< + Shape_, + Element_, + layout::VoltaTensorOpMultiplicandCongruous::value>, + AdvanceRank, + ThreadMap_, + Alignment> { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::VoltaTensorOpMultiplicandCongruous::value>; + static int const kAdvanceRank = AdvanceRank; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Internal details made public to facilitate introspection + struct Detail { + + /// This iterator is specialized for an access size that is 128 bits in length. + static int const kAccessSizeInBits = 128; + + static_assert( + sizeof_bits::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits, + "This iterator requires a policy whose access size is 128bs"); + + ///< Number of pointers + static int const kPointerCount = (ThreadMap::Iterations::kStrided > 1 ? 2 : 1); + }; + + +private: + + /// Element type per access + using AccessType = Array; + +public: + + /// Fragment object to be loaded or stored + using Fragment = Array; + +private: + + // + // Data members + // + + /// Stride value + StrideIndex stride_; + + /// Internal pointer to first access of tile + AccessType * pointer_[Detail::kPointerCount]; + + /// Internal byte offset + Index byte_offset_; + +public: + + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator( + TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ): stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0) { + + layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id); + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Detail::kPointerCount; ++i) { + + // This is the offset of a thread within a threadblock tile for a specific pointer + // (units of elements) + layout::PitchLinearCoord thread_offset_in_threadblock_tile = + thread_offset_base + layout::PitchLinearCoord{0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i}; + + // initialize pointer + pointer_[i] = reinterpret_cast(ref.data() + ref.offset(thread_offset_in_threadblock_tile)); + } + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + + byte_offset_ += pointer_offset * sizeof(Element); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + + add_pointer_offset((kAdvanceRank ? Shape::kStrided * stride_ * Layout::kElementsPerAccess : Shape::kContiguous)); + + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + + RegularTileIterator prev(*this); + this->operator++(); + + return prev; + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + add_pointer_offset( + coord.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess + + coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess + ); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + + AccessType *frag_ptr = reinterpret_cast(&frag); + + Index vec_pointer_offset = pointer_offset / ThreadMap::kElementsPerAccess; + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + + AccessType *access_ptr = pointer_[s & 1]; + int stride_idx = (s & ~1); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + + int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ + + c * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess + + vec_pointer_offset; + + int access_idx = c + s * ThreadMap::Iterations::kContiguous; + + char const *access_byte_ptr = reinterpret_cast(access_ptr + access_offset); + + frag_ptr[access_idx] = *reinterpret_cast(access_byte_ptr + byte_offset_); + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, + Index pointer_offset) { + + AccessType const *frag_ptr = reinterpret_cast(&frag); + + Index vec_pointer_offset = pointer_offset / ThreadMap::kElementsPerAccess; + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + + AccessType *access_ptr = pointer_[s & 1]; + int stride_idx = (s & ~1); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + + int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ + + c * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess + + vec_pointer_offset; + + int access_idx = c + s * ThreadMap::Iterations::kContiguous; + + char *access_byte_ptr = reinterpret_cast(access_ptr + access_offset); + + *reinterpret_cast(access_byte_ptr + byte_offset_) = frag_ptr[access_idx]; + } + } + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// Tile Iterator specialized for column-major congruous TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator< + Shape_, + Element_, + layout::ColumnMajorVoltaTensorOpMultiplicandCongruous::value>, + AdvanceRank, + ThreadMap_, + Alignment> { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for column-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajorVoltaTensorOpMultiplicandCongruous::value>; + static int const kAdvanceRank = AdvanceRank; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileIterator< + layout::PitchLinearShape, + Element, + layout::VoltaTensorOpMultiplicandCongruous::value>, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap_>; + +public: + + /// Fragment object to be loaded or stored + using Fragment = Array; + +private: + + /// Underlying iterator + UnderlyingIterator iterator_; + +public: + + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator( + TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ): iterator_({ref.data(), ref.stride()}, thread_id) { + + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + + RegularTileIterator prev(*this); + ++iterator_; + + return prev; + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, + Index pointer_offset) { + + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for row-major congruous TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator< + Shape_, + Element_, + layout::RowMajorVoltaTensorOpMultiplicandCongruous::value>, + AdvanceRank, + ThreadMap_, + Alignment> { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for row-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajorVoltaTensorOpMultiplicandCongruous::value>; + static int const kAdvanceRank = AdvanceRank; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileIterator< + layout::PitchLinearShape, + Element, + layout::VoltaTensorOpMultiplicandCongruous::value>, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap_>; + +public: + + /// Fragment object to be loaded or stored + using Fragment = Array; + +private: + + /// Underlying iterator + UnderlyingIterator iterator_; + +public: + + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator( + TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ): iterator_({ref.data(), ref.stride()}, thread_id) { + + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + + RegularTileIterator prev(*this); + ++iterator_; + + return prev; + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, + Index pointer_offset) { + + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; +/// Tile iterator specialized for congruous arrangements for TensorOps +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator< + Shape_, + Element_, + layout::VoltaTensorOpMultiplicandBCongruous::value>, + AdvanceRank, + ThreadMap_, + Alignment> { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::VoltaTensorOpMultiplicandBCongruous::value>; + static int const kAdvanceRank = AdvanceRank; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using StrideIndex = typename Layout::Stride::Index; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Internal details made public to facilitate introspection + struct Detail { + + /// This iterator is specialized for an access size that is 128 bits in length. + static int const kAccessSizeInBits = 128; + + static_assert( + sizeof_bits::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits, + "This iterator requires a policy whose access size is 128bs"); + + ///< Number of pointers + static int const kPointerCount = (ThreadMap::Iterations::kStrided > 1 ? 2 : 1); + }; + + +private: + + /// Element type per access + using AccessType = Array; + +public: + + /// Fragment object to be loaded or stored + using Fragment = Array; + +private: + + // + // Data members + // + + /// Stride value + StrideIndex stride_; + + /// Internal pointer to first access of tile + AccessType * pointer_[Detail::kPointerCount]; + + /// Internal byte offset + Index byte_offset_; + +public: + + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator( + TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ): stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0) { + + layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id); + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Detail::kPointerCount; ++i) { + + // This is the offset of a thread within a threadblock tile for a specific pointer + // (units of elements) + layout::PitchLinearCoord thread_offset_in_threadblock_tile = + thread_offset_base + layout::PitchLinearCoord{0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i}; + + // initialize pointer + pointer_[i] = reinterpret_cast(ref.data() + ref.offset(thread_offset_in_threadblock_tile)); + } + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + + byte_offset_ += pointer_offset * sizeof(Element); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + + add_pointer_offset((kAdvanceRank ? Shape::kStrided * stride_ * Layout::kElementsPerAccess : Shape::kContiguous)); + + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + + RegularTileIterator prev(*this); + this->operator++(); + + return prev; + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + add_pointer_offset( + coord.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess + + coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess + ); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + + AccessType *frag_ptr = reinterpret_cast(&frag); + + Index vec_pointer_offset = pointer_offset / ThreadMap::kElementsPerAccess; + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + + AccessType *access_ptr = pointer_[s & 1]; + int stride_idx = (s & ~1); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + + int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ + + c * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess + + vec_pointer_offset; + + int access_idx = c + s * ThreadMap::Iterations::kContiguous; + + char const *access_byte_ptr = reinterpret_cast(access_ptr + access_offset); + + frag_ptr[access_idx] = *reinterpret_cast(access_byte_ptr + byte_offset_); + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, + Index pointer_offset) { + + AccessType const *frag_ptr = reinterpret_cast(&frag); + + Index vec_pointer_offset = pointer_offset / ThreadMap::kElementsPerAccess; + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + + AccessType *access_ptr = pointer_[s & 1]; + int stride_idx = (s & ~1); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + + int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ + + c * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess + + vec_pointer_offset; + + int access_idx = c + s * ThreadMap::Iterations::kContiguous; + + char *access_byte_ptr = reinterpret_cast(access_ptr + access_offset); + + *reinterpret_cast(access_byte_ptr + byte_offset_) = frag_ptr[access_idx]; + } + } + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for column-major congruous TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator< + Shape_, + Element_, + layout::ColumnMajorVoltaTensorOpMultiplicandBCongruous::value>, + AdvanceRank, + ThreadMap_, + Alignment> { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for column-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajorVoltaTensorOpMultiplicandBCongruous::value>; + static int const kAdvanceRank = AdvanceRank; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileIterator< + layout::PitchLinearShape, + Element, + layout::VoltaTensorOpMultiplicandBCongruous::value>, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap_>; + +public: + + /// Fragment object to be loaded or stored + using Fragment = Array; + +private: + + /// Underlying iterator + UnderlyingIterator iterator_; + +public: + + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator( + TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ): iterator_({ref.data(), ref.stride()}, thread_id) { + + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + + RegularTileIterator prev(*this); + ++iterator_; + + return prev; + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, + Index pointer_offset) { + + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for row-major congruous TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator< + Shape_, + Element_, + layout::RowMajorVoltaTensorOpMultiplicandBCongruous::value>, + AdvanceRank, + ThreadMap_, + Alignment> { +public: + + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for row-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajorVoltaTensorOpMultiplicandBCongruous::value>; + static int const kAdvanceRank = AdvanceRank; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileIterator< + layout::PitchLinearShape, + Element, + layout::VoltaTensorOpMultiplicandBCongruous::value>, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap_>; + +public: + + /// Fragment object to be loaded or stored + using Fragment = Array; + +private: + + /// Underlying iterator + UnderlyingIterator iterator_; + +public: + + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator( + TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ): iterator_({ref.data(), ref.stride()}, thread_id) { + + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + + RegularTileIterator prev(*this); + ++iterator_; + + return prev; + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + load_with_pointer_offset(frag, 0); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, + Index pointer_offset) { + + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { + store_with_pointer_offset(frag, 0); + } +}; + + +/// Tile iterator specialized for crosswise arrangements for TensorOps. +/// +/// Volta TN SMEM layout is a little diffrent: +/// Crosseised elements will be stored in a line, while contiguous elements +/// sre stored in line-by-line. +/// Padding is used to reduce SMEM bank conflicts. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator< + Shape_, Element_, + layout::VoltaTensorOpMultiplicandCrosswise::value, + Shape_::kContiguous>, + AdvanceRank, ThreadMap_, Alignment> { + + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = + layout::VoltaTensorOpMultiplicandCrosswise::value, + Shape::kContiguous>; + static int const kAdvanceRank = AdvanceRank; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Internal details made public to facilitate introspection + struct Detail { + + ///< Number of pointers + static int const kPointerCount = (ThreadMap::Iterations::kStrided > 1 ? 2 : 1); + + /// Iterations for the kElementsPerAccess of ThreadMap + static int const kIterarionsPerAccess = + ThreadMap::kElementsPerAccess / Layout::kElementsPerAccess; + + /// Contiguous elements per line + static int const kContiguousElementsPerLine = 4; + }; + + private: + /// Element type per access + using AccessType = Array; + + public: + /// Fragment object to be loaded or stored + using Fragment = + Array; + + private: + // + // Data members + // + + /// The crosswised elements will be stored in a line. + /// line_size is size of crosswised dimension plus padding. + /// in units of AccessType + Index line_size; + + /// Internal pointer to first access of tile + AccessType *pointer_[Detail::kPointerCount]; + + /// Internal byte offset + Index byte_offset_; + + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : line_size(ref.stride(0) * Detail::kContiguousElementsPerLine / Layout::kElementsPerAccess), + byte_offset_(0) { + + layout::PitchLinearCoord thread_offset_base = + ThreadMap::initial_offset(thread_id); + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Detail::kPointerCount; ++i) { + // This is the offset of a thread within a threadblock tile for a specific + // pointer (units of elements) + layout::PitchLinearCoord thread_offset_in_threadblock_tile = + thread_offset_base + + layout::PitchLinearCoord{ + 0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i}; + + // initialize pointer + pointer_[i] = reinterpret_cast( + ref.data() + ref.offset(thread_offset_in_threadblock_tile)); + } + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + byte_offset_ += pointer_offset * sizeof(Element); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + // (Shape::kContiguous/Layout::kElementsPerAccess)* + // line_size * Layout::kElementsPerAccess + add_pointer_offset(Shape::kContiguous * line_size); + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + RegularTileIterator prev(*this); + this->operator++(); + + return prev; + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + add_pointer_offset((coord.contiguous() * (Shape::kContiguous / Layout::kElementsPerAccess) * + line_size + coord.strided() * Shape::kStrided) * + Layout::kElementsPerAccess); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + AccessType *frag_ptr = reinterpret_cast(&frag); + + Index vec_pointer_offset = pointer_offset / Layout::kElementsPerAccess; + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + AccessType *access_ptr = pointer_[(s & 1) ^ (s / 2)]; + + access_ptr += 16 * (s / 2); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + + CUTLASS_PRAGMA_UNROLL + for(int i = 0; i < Detail::kIterarionsPerAccess; ++i) { + + int access_offset = + c * ThreadMap::Delta::kContiguous / Detail::kContiguousElementsPerLine * line_size + + vec_pointer_offset + i * line_size; + + int access_idx = (c + s * ThreadMap::Iterations::kContiguous) * + Detail::kIterarionsPerAccess + i; + + char const *access_byte_ptr = reinterpret_cast(access_ptr + access_offset); + + frag_ptr[access_idx] = *reinterpret_cast( + access_byte_ptr + byte_offset_); + } + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + AccessType const *frag_ptr = reinterpret_cast(&frag); + + Index vec_pointer_offset = pointer_offset / Layout::kElementsPerAccess; + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + + AccessType *access_ptr = pointer_[(s & 1) ^ ((s >> 1) & 1)]; + + access_ptr += 16 * (s / 2) + vec_pointer_offset; + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for(int i = 0; i < Detail::kIterarionsPerAccess; ++i) { + + int access_offset = + c * ThreadMap::Delta::kContiguous / Detail::kContiguousElementsPerLine * line_size + i * line_size; + + int access_idx = (c + s * ThreadMap::Iterations::kContiguous) * + Detail::kIterarionsPerAccess + i; + + char *access_byte_ptr = reinterpret_cast(access_ptr + access_offset); + + *reinterpret_cast(access_byte_ptr + byte_offset_) = + frag_ptr[access_idx]; + } + } + } + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for column-major crosswise TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator::value, Shape_::kRow>, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for column-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kRow>; + static int const kAdvanceRank = AdvanceRank; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileIterator< + layout::PitchLinearShape, Element, + layout::VoltaTensorOpMultiplicandCrosswise::value, + Shape::kRow>, + (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; + + public: + /// Fragment object to be loaded or stored + using Fragment = Array; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.row(), coord.column()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + RegularTileIterator prev(*this); + ++iterator_; + + return prev; + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tile Iterator specialized for row-major crosswise TensorOp formats. +/// +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept +/// +template < + typename Shape_, + typename Element_, + int AdvanceRank, + typename ThreadMap_, + int Alignment +> +class RegularTileIterator::value, Shape_::kColumn>, + AdvanceRank, ThreadMap_, Alignment> { + public: + static_assert( + AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for row-major iterator may along advance along the " + "columns(rank=0) or rows(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajorVoltaTensorOpMultiplicandCrosswise< + sizeof_bits::value, Shape::kColumn>; + static int const kAdvanceRank = AdvanceRank; + static int const kAlignment = Alignment; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorCoord = typename Layout::TensorCoord; + + using ThreadMap = ThreadMap_; + + /// Underlying iterator type + using UnderlyingIterator = RegularTileIterator< + layout::PitchLinearShape, Element, + layout::VoltaTensorOpMultiplicandCrosswise::value, + Shape::kColumn>, + (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; + + public: + /// Fragment object to be loaded or stored + using Fragment = Array; + + private: + /// Underlying iterator + UnderlyingIterator iterator_; + + public: + /// Construct a TileIterator with zero threadblock offset + CUTLASS_HOST_DEVICE + RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor + int thread_id ///< ID of each participating thread + ) + : iterator_({ref.data(), ref.stride()}, thread_id) {} + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Adds a tile offset + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const &coord) { + iterator_.add_tile_offset({coord.column(), coord.row()}); + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator &operator++() { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + RegularTileIterator operator++(int) { + RegularTileIterator prev(*this); + ++iterator_; + + return prev; + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/vector_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/vector_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..8536a326501463b84be5a0d903988b83b8d738ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/threadblock/vector_iterator.h @@ -0,0 +1,149 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template wraps the vector access iterator concept to load whole vector from tensors in + memory. This is typically used for per-channel scale and bias in convolution kernels. +*/ + +#pragma once + +#include "cutlass/transform/threadblock/predicated_vector_access_iterator.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +class VectorIterator { +public: + using VectorAccessIterator = VectorAccessIterator_; + + using Shape = typename VectorAccessIterator::Shape; + using Element = typename VectorAccessIterator::Element; + using Layout = typename VectorAccessIterator::Layout; + using TensorCoord = typename Layout::TensorCoord; + using AccessType = typename VectorAccessIterator::AccessType; + using TensorRef = typename VectorAccessIterator::TensorRef; + using Index = typename VectorAccessIterator::Index; + using LongIndex = typename VectorAccessIterator::LongIndex; + + static int const kElementsPerAccess = VectorAccessIterator::kElementsPerAccess; + static int const kRowsPerIteration = VectorAccessIterator::kRowsPerIteration; + static int const kThreads = VectorAccessIterator::kThreads; + static int const kIterations = VectorAccessIterator::kIterations; + + /// Fragment object to be loaded or stored + using Fragment = cutlass::Array< + Element, kElementsPerAccess * kIterations>; + +private: + + /// Internal state + VectorAccessIterator vector_access_iterator_; + +public: + + /// Constructor + CUTLASS_HOST_DEVICE + VectorIterator( + Element const *ptr, + TensorCoord extent, + int thread_idx, + int warp_idx, + MatrixCoord const &threadblock_offset = MatrixCoord() + ): + vector_access_iterator_(ptr, extent, thread_idx, warp_idx, threadblock_offset) { } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + VectorIterator &operator++() { + vector_access_iterator_.advance(); + return *this; + } + + /// Advances to the next tile in memory. + CUTLASS_HOST_DEVICE + VectorIterator operator++(int) { + VectorIterator self(*this); + operator++(); + return self; + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { + + frag.clear(); + AccessType *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < kIterations; ++c) { + + cutlass::arch::global_load< + AccessType, + sizeof(AccessType) + >( + frag_ptr[c], + vector_access_iterator_.get() + pointer_offset, + vector_access_iterator_.valid() + ); + + ++vector_access_iterator_; + } +// } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag) { + vector_access_iterator_.set_iteration_index(0); + load_with_pointer_offset(frag, 0); + } + + CUTLASS_DEVICE + void advance() { + vector_access_iterator_.advance(); + } + +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/warp/vector_fragment_iterator.h b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/warp/vector_fragment_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..5b5babacf51c68a700943342b94929ac83843a9e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/include/cutlass/transform/warp/vector_fragment_iterator.h @@ -0,0 +1,283 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + + +/*! \file + \brief This defines a "fragment" iterator for visiting the fragments of a warp vector + that participate in one warp-level mma operation. + + Typically, this is used to access the scale/bias fragement of a warp-level mma operation. + The scale/bias vector is then partitioned into smaller fragments that can be fed into + next warp-level mma operation. + + This iterator is necessary to accomplish warp-level mma fusion where the scale/bias vector is + applied to the multiplicand for the next mma. + +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/array.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/numeric_conversion.h" + +namespace cutlass { +namespace transform { +namespace warp { + + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the input fragment tile shape (concept: MatrixShape) + typename Shape_, + /// Element type + typename Element_, + /// Layout of operand in memory + typename Layout_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + //// Number of elements per access when loading fragment + int ElementsPerAccess> +class VectorFragmentIterator; + + +// Partial specialization for PitchLinear layout tile + +template < + /// Size of the input fragment vector shape (concept: MatrixShape) + typename Shape_, + /// Element type + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + //// Number of elements per access when loading fragment + int ElementsPerAccess> +class VectorFragmentIterator { + public: + + /// Size of the input threadblock tile shape (concept: MatrixShape) + using Shape = Shape_; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::PitchLinear; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Number of participating threads + static int const kThreads = 32; + + static int const kElementsPerAccess = ElementsPerAccess; + static int const kRowsPerIteration = 8; + static int const kColumnsPerAccess = 8; + static int const kElementsPerIteration = kRowsPerIteration * InstructionShape::kK / kThreads; + static int const kAccessPerIteration = kElementsPerIteration / kElementsPerAccess; + + /// Number of iterations + using Iterations = MatrixShape; + +public: + + // + // Derived quantities + // + // All fragments have kElementsPerAccess scale followed by bias + + /// Fragment object holding a thread's part of a tile + /// This is the fragment size produced by one iteration of the iterator. + using Fragment = Array; + + /// Input threadblock fragment tile + using ThreadblockFragment = Array; + +private: + + /// Internal access type + using AccessType = Array; + +private: + // + // Data members + // + + /// Input threadblock fragment tile + AccessType const *iterator_; + + /// Internal index + int index_; + +public: + /// Constructs an iterator + CUTLASS_HOST_DEVICE + VectorFragmentIterator(ThreadblockFragment const &threadblock_frag) + : iterator_(reinterpret_cast(&threadblock_frag)), + index_(0) {} + + /// Add offset + CUTLASS_HOST_DEVICE + void add_offset(int index_offset) { + index_ += index_offset; + + if(index_ >= Iterations::kColumn) + index_ = 0; + } + + /// Increments + CUTLASS_HOST_DEVICE + VectorFragmentIterator &operator++() { + add_offset(1); + return *this; + } + + CUTLASS_HOST_DEVICE + void set_index(int idx) { + index_ = idx; + } + + /// Loads a fragment from the referenced part of the accumulator tile + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + AccessType *frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int r = 0; r < Iterations::kRow; r++) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kAccessPerIteration; i++) { + + frag_ptr[i * Iterations::kRow + r].clear(); + frag_ptr[i * Iterations::kRow + r] = iterator_[index_ * kAccessPerIteration + i]; + } + } + } + +}; + +// Partial specialization for Row-Major layout tile + +template < + /// Size of the input fragment tile shape (concept: MatrixShape) + typename Shape_, + /// Element type + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + //// Number of elements per access when loading fragment + int ElementsPerAccess> +class VectorFragmentIterator { + public: + + /// Size of the input threadblock tile shape (concept: MatrixShape) + using Shape = Shape_; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajor; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Underlying iterator + using Base = VectorFragmentIterator< + layout::PitchLinearShape, Element, + layout::PitchLinear, InstructionShape, ElementsPerAccess>; + + + public: + + // + // Derived quantities + // + /// Fragment object holding a thread's part of a tile + /// This is the fragment size produced by one iteration of the iterator. + using Fragment = typename Base::Fragment; + + /// Input threadblock fragment tile + using ThreadblockFragment = typename Base::ThreadblockFragment; + + private: + /// Underlying iterator + Base iterator_; + +public: + /// Constructs an iterator + CUTLASS_HOST_DEVICE + VectorFragmentIterator(ThreadblockFragment const &threadblock_frag) + : iterator_(threadblock_frag) {} + + /// Add offset + CUTLASS_HOST_DEVICE + void add_offset(int index_offset) { + iterator_.add_offset(index_offset); + } + + /// Increments + CUTLASS_HOST_DEVICE + VectorFragmentIterator &operator++() { + add_offset(1); + return *this; + } + + CUTLASS_HOST_DEVICE + void set_index(int idx) { + iterator_.set_index(idx); + } + + /// Loads a fragment from the referenced part of the accumulator tile + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + iterator_.load(frag); + } + +}; + + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace conv +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/build/building_in_windows_with_visual_studio.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/build/building_in_windows_with_visual_studio.md new file mode 100644 index 0000000000000000000000000000000000000000..51bdf7e5f7a9e5d7ddd57850b20f2088dc721eb0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/build/building_in_windows_with_visual_studio.md @@ -0,0 +1,93 @@ +[README](../README.md#documentation) > **CUTLASS 3.0: Building on Windows with Visual Studio** + +# Building on Windows with Visual Studio + +CUTLASS 3.2 reintroduces support for the Microsoft Visual Studio compiler on Windows. +Users and developers may build either +in Visual Studio's graphical integrated development environment, +or on the command line with `cmake --build`. + +# Software prerequisites + +1. Windows 10 or 11 + +2. Visual Studio 2019 version 16.11.27, or Visual Studio 2022 + +3. CUDA Toolkit (at least 12.2; earlier 12.x versions may work) + +4. CMake (at least 3.18) + +5. git + +6. Python (at least 3.6) + +Visual Studio must be installed *before* the CUDA Toolkit. +Otherwise, Visual Studio's build system won't know about CUDA. + +# Operating system settings + +By default, Windows restricts the maximum file path length (`MAX_PATH`) to 260 characters. +CUTLASS has many files and directory paths that challenge this requirement. +As a result, CUTLASS is unlikely to build with this default setting. +The choice of source and build directories affect path lengths, +so the kinds of errors and whether they occur may depend on this. +Symptoms may vary, from errors when running `cmake` +(e.g., during the "generating library instances" step) to build failures. + +CUTLASS recommends changing the maximum file path length setting +and rebooting the computer before attempting to clone or build CUTLASS. +Windows 10 (as of version 1607) and 11 permit changing this setting +by making sure that the following registry key exists, +and that its value is set to 1. + +``` +Computer\HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\FileSystem\LongPathsEnabled +``` + +After changing the registry key's value, reboot the computer first +before attempting to clone or build CUTLASS. + +[This Microsoft help article](https://learn.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=registry) +explains different ways to change the registry setting. + +# Limitations + +Currently, it's possible to build examples and tests. +Building the CUTLASS library (e.g., for profiling) with default settings does not currently work, +because Visual Studio's linker cannot handle more than 65535 symbols in a library. +(The symptom of this issue is a LNK1189 linker error.) +The known way to work around this Visual Studio limitation is to disable building CUTLASS's library, +by setting the CMake option `CUTLASS_ENABLE_LIBRARY` to `OFF`. +Another approach may be to limit the number of kernels in the library +by setting the CMake option `CUTLASS_LIBRARY_KERNELS` +so that CUTLASS tries to put fewer kernels in the library. + +# Set up build environment + +1. Run "git bash" to get a familiar command-line interface + +2. Edit `~/.profile` and set the environment variables as needed to access the CUTLASS repository + +3. Clone the CUTLASS repository + +4. Create the `build` subdirectory in the CUTLASS clone directory, and run CMake in it, + specifying whatever CMake options are desired, e.g., + `cmake .. -DCUTLASS_NVCC_ARCHS=90a -DCUTLASS_ENABLE_LIBRARY=OFF` + +Alternate approaches may rely on the CMake GUI and/or Windows' native command line. + +# Building + +A successful CMake run will create a `CUTLASS.sln` Visual Studio "solution" file in the build directory. +One can open this in Visual Studio and build the entire solution or any subset of projects as desired. +It may be necessary to limit maximum build parallelism by setting the appropriate Visual Studio option. + +Alternately, one can run `cmake --build . --config Release -j 4` in the build directory. +Replace 4 with the desired maximum build parallelism. +It's important to put the `--build` option before the period that signifies the build directory. +The `--config` option specifies the kind of build; +`--config Release` builds a Release build, while `--config Debug` builds a Debug build. +Unlike with CMake's Makefile or Ninja generators, +`CMAKE_BUILD_TYPE` has no effect on the Visual Studio generator, +because the Visual Studio generator creates all build configurations. + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/build/building_with_clang_as_host_compiler.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/build/building_with_clang_as_host_compiler.md new file mode 100644 index 0000000000000000000000000000000000000000..cde9220690c26397579874a5cf5228d9d485bb2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/build/building_with_clang_as_host_compiler.md @@ -0,0 +1,53 @@ +[README](../README.md#documentation) > **CUTLASS 3: Building with Clang as host compiler** + +# Building with Clang as host compiler + +CUTLASS 3.2(.1) reintroduces support for building with +Clang as host compiler, and NVCC as device compiler. +This is NOT the same as building with +Clang as both host and device compiler ("CUDA Clang"). + +# Software prerequisites + +1. Clang (tested with Clang 14) + +2. CUDA Toolkit (tested with 12.2; other versions likely work) + +3. CMake (at least 3.18) + +4. git + +5. Python (at least 3.6) + +Experience with Ubuntu 22.04 LTS is that +clang requires the following packages to be installed. + +```bash +$ sudo apt-get install clang cmake ninja-build pkg-config libgtk-3-dev liblzma-dev libstdc++-12-dev +``` + +A symptom of not installing all needed dependencies +is the following error when attempting to use clang: +`"/usr/bin/ld: cannot find -lstdc++: No such file or directory"`. + +# Running CMake + +The Clang build requires specifying the following three CMake options. + +* `CMAKE_CXX_COMPILER=clang++` +* `CMAKE_CUDA_HOST_COMPILER=clang++` + +* `CMAKE_C_COMPILER=clang` + +This assumes that `clang++` and `clang` are in the user's `PATH`. +Please note that both `CMAKE_CXX_COMPILER` and `CMAKE_C_COMPILER` +must be set, even though CUTLASS is a C++ project, not a C project. + +Users can also specify a particular CUDA Toolkit version +by setting the CMake option `CMAKE_CUDA_COMPILER` +to the path to the `nvcc` executable +that lives in the CUDA Toolkit's directory. For example, +if `${PATH_TO_CUDA_TOOLKIT}` is the CUDA Toolkit directory, +then one can set `CMAKE_CUDA_COMPILER` as follows. + +* `CMAKE_CUDA_COMPILER=${PATH_TO_CUDA_TOOLKIT}/bin/nvcc` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/code_organization.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/code_organization.md new file mode 100644 index 0000000000000000000000000000000000000000..ea196795304540994f930c079caf9e5aa85080cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/code_organization.md @@ -0,0 +1,269 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Code Organization") + +[README](/README.md#documentation) > **Code Organization** + +# CUTLASS Code Organization + +This document describes the layout of the CUTLASS repository. The main components are: + +* **CUTLASS Template Library** - CUDA Templates for Linear Algebra Subroutines and Solvers (header only) +* **CuTe Template Library** - CUTLASS's core vocabulary layout type and associated algebra (header only) +* **CUTLASS Utilities** - Additional templates +* **CUTLASS Instance Library** - instantiations of CUTLASS templates covering the design space +* **CUTLASS Profiler** - CUTLASS Library, Profiler, and Utilities +* **Examples** - SDK examples of CUTLASS Template Library and components +* **Media** - supporting documentation and media content +* **Tests** - test components for CUTLASS Template Library and tools + +## CUTLASS Template Library + +CUDA Templates for Linear Algebra Subroutines and Solvers is a library of CUDA C++ template classes for +performing efficient matrix computations on NVIDIA GPUs. + +Like NVIDIA CUB, the components of CUTLASS are organized hierarchically based on the scope of cooperative +elements. For example, warp-level GEMM components perform a matrix multiply collectively by the +set of threads within a warp. The following figure illustrates each layer. + +Components are designed to be usable by client applications accessing functionailty at each scope. + +CUTLASS Templates are implemented by header files in the following directory structure: + +``` +include/ # Top-level include directory. Client applications should target this path. + cutlass/ # CUDA Templates for Linear Algebra Subroutines and Solvers - headers only + + arch/ # direct exposure of architecture features (including instruction-level GEMMs) + * + gemm/ # code specialized for general matrix product computations + thread/ # thread-level operators + warp/ # warp-level operators + collective/ # 3.x API operators for all threads a tiled mma/copy are built over + threadblock/ # CTA-level operators + kernel/ # CUDA kernel entry points + device/ # launches kernel(s) over a full device + * # scope-agnostic components and basic vocabulary type definitions for GEMM + + layout/ # layout definitions for matrices, tensors, and other mathematical objects in memory + * + + reduction/ # bandwidth-limited reduction kernels that do not fit the "gemm" models + thread/ # thread-level operators + warp/ # warp-level operators + threadblock/ # CTA-level operators + kernel/ # CUDA kernel entry points + device/ # launches kernel(s) over a full device + * # scope-agnostic components and basic vocabulary type definitions + + transform/ # code specialized for layout, type, and domain transformations + thread/ # thread-level operators + warp/ # warp-level operators + threadblock/ # CTA-level operators + kernel/ # CUDA kernel entry points + device/ # launches kernel(s) over a full device + * # scope-agnostic components and basic vocabulary type definitions + + util/ # miscellaneous CUTLASS components + * + * # core vocabulary types and fundamental arithmetic operators + + cute / # CuTe Layout, layout algebra, MMA/Copy atoms, tiled MMA/Copy + algorithm/ # Definitions of core operations such as copy, gemm, and operations on cute::tuples + arch/ # Bare bones PTX wrapper structs for copy and math instructions + atom/ # Meta-information either link to or built from arch/ operators + mma_atom.hpp # cute::Mma_Atom and cute::TiledMma + copy_atom.hpp # cute::Copy_Atom and cute::TiledCopy + *sm*.hpp # Arch specific meta-information for copy and math operations + container/ # Core container types used across CuTe, namely, cute::tuple + numeric/ # CuTe's internal numerics implementation + * # Core library types such as Shape, Stride, Layout, Tensor, and associated operations +``` + +See [Programming Guidelines](/media/docs/programming_guidelines.md) for further details about +conventions and design patterns used throughout CUTLASS. + +## CuTe + +CuTe is a collection of C++ CUDA template abstractions for defining and operating on hierarchically multidimensional layouts of threads and data. CuTe provides `Layout` and `Tensor` objects that compactly packages the type, shape, memory space, and layout of data, while performing the complicated indexing for the user. This lets programmers focus on the logical descriptions of their algorithms while CuTe does the mechanical bookkeeping for them. With these tools, we can quickly design, implement, and modify all dense linear algebra operations. More documentation +for CuTe can be found in [`/media/docs/cute/`](/media/docs/cute/). + +## Tools + +The `tools/` directory contains clients of the CUTLASS Template library and includes the following. + +## CUTLASS Instance Library + +The CUTLASS Instance Library contains instantiations of the above CUTLASS templates covering supported configurations, +data types, block structure, and tile sizes. These instantiations are procedurally generated using a set of +scripts to span the design space. + +``` +tools/ + library/ # static/dynamic library containing all kernel instantiations of interest + # (with some build-level filter switches to compile specific subsets) + + include/ + cutlass/ + library/ # header files for CUTLASS Deliverables Library (in cutlass::library:: namespace) + + handle.h # implements a host-side API for launching kernels, similar to cuBLAS + library.h # defines enums and structs to describe the tiled structure of operator instances + manifest.h # collection of all instances + + src/ + +python/ + cutlass_library/ # scripts to procedurally generate CUTLASS template instances + + gemm_operations.py + library.py + generator.py # entry point of procedural generation scripts - invoked by cmake + manifest.py +``` + +When CMake is executed, the CUTLASS Instance Library generator scripts are executed to construct a set of +instantiations in `build/tools/library/generated/`. + +### CUTLASS Profiler + +The CUTLASS Profiler is designed to load the CUTLASS Instance Library and execute all operations contained therein. +This command-line driven application constructs an execution environment for evaluating functionality and performance. +It is implemented in +``` +tools/ + profiler/ +``` + +and may be built as follows. +``` +$ make cutlass_profiler -j +``` + +[Further details about the CUTLASS Profiler are described here.](/media/docs/profiler.md) + +### CUTLASS Utilities + +`tools/util/` defines a companion library of headers and sources that support the CUTLASS test programs, examples, and other client applications. Its structure is as follows: + +``` +tools/ + util/ + include/ + cutlass/ + util/ # CUTLASS Utility companion library + + reference/ # functional reference implementation of CUTLASS operators + # (minimal consideration for performance) + + detail/ + * + + device/ # device-side reference implementations of CUTLASS operators + thread/ + kernel/ + * + host/ # host-side reference implementations of CUTLASS operators + * + * +``` + +[More details about CUTLASS Utilities may be found here.](/media/docs/utilities.md) + + +## Examples + +To demonstrate CUTLASS components, several SDK examples are implemented in `examples/`. + +CUTLASS SDK examples apply CUTLASS templates to implement basic computations. + +``` +examples/ + 00_basic_gemm/ # launches a basic GEMM with single precision inputs and outputs + + 01_cutlass_utilities/ # demonstrates CUTLASS Utilities for allocating and initializing tensors + + 02_dump_reg_smem/ # debugging utilities for printing register and shared memory contents + + 03_visualize_layout/ # utility for visualizing all layout functions in CUTLASS + + 04_tile_iterator/ # example demonstrating an iterator over tiles in memory + + 05_batched_gemm/ # example demonstrating CUTLASS's batched strided GEMM operation + + 06_splitK_gemm/ # exmaple demonstrating CUTLASS's Split-K parallel reduction kernel + + 07_volta_tensorop_gemm/ # example demonstrating mixed precision GEMM using Volta Tensor Cores + + 08_turing_tensorop_gemm/ # example demonstrating integer GEMM using Turing Tensor Cores + + 10_planar_complex/ # example demonstrating planar complex GEMM kernels + + 11_planar_complex_array/ # example demonstrating planar complex kernels with batch-specific problem sizes + + 12_gemm_bias_relu/ # example demonstrating GEMM fused with bias and relu activation function + + 13_fused_two_gemms/ # example demonstrating two GEMMs fused into one kernel +``` + +## Media + +This directory contains documentation, images, and performance result data which accompanies the CUTLASS library and components. + +## Tests + +Test programs for CUTLASS. Tests are organized hierarchically, mirroring the organization of source files. +``` +test/ # unit tests for CUTLASS Template Library + unit/ + arch/ + core/ + gemm/ + device/ + kernel/ + thread/ + threadblock/ + warp/ + reduction/ + kernel/ + thread/ + transform/ + threadblock/ + * +``` +Tests can be built and run at the top level scope by invoking `make test_unit` or by building +and explicitly executing each individual target, e.g. `cutlass_test_unit_gemm_device`. + +Tests are configured to specify appropriate GTest filter strings to avoid running except on +architectures where they are expected to pass. Thus, no tests should fail. The actual number +of tests run may vary over time as more are added. + +# Copyright + +Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/00_quickstart.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/00_quickstart.md new file mode 100644 index 0000000000000000000000000000000000000000..a9c35f1be3d27df35946a1dd4a82cb07bfc7765b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/00_quickstart.md @@ -0,0 +1,78 @@ +# Getting Started With CuTe + +CuTe is a collection of C++ CUDA template abstractions for defining and operating on hierarchically multidimensional layouts of threads and data. CuTe provides `Layout` and `Tensor` objects that compactly packages the type, shape, memory space, and layout of data, while performing the complicated indexing for the user. This lets programmers focus on the logical descriptions of their algorithms while CuTe does the mechanical bookkeeping for them. With these tools, we can quickly design, implement, and modify all dense linear algebra operations. + +The core abstraction of CuTe are the hierarchically multidimensional layouts which can be composed with data arrays to represent tensors. The representation of layouts is powerful enough to represent nearly everything we need to implement efficient dense linear algebra. Layouts can also be combined and manipulated via functional composition, on which we build a large set of common operations such as tiling and partitioning. + +## System Requirements + +CuTe shares CUTLASS 3.0's software requirements, +including NVCC with a C++17 host compiler. + +## Knowledge prerequisites + +CuTe is a CUDA C++ library. It requires C++17 +(the revision of the C++ Standard that was released in 2017). + +Throughout this tutorial, we assume intermediate C++ experience. +For example, we assume that readers know +how to read and write templated functions and classes, and +how to use the `auto` keyword to deduce a function's return type. +We will be gentle with C++ and explain some things +that you might already know. + +We also assume intermediate CUDA experience. +For example, readers must know +the difference between device and host code, +and how to launch kernels. + +## Building Tests and Examples + +CuTe's tests and examples build and run as part of CUTLASS's normal build process. +CuTe's unit tests live in the [`test/unit/cute`](../../../test/unit/cute) subdirectory. +Its examples live in the [`examples/cute`](../../../examples/cute) subdirectory. + +## Library Organization + +CuTe is a header-only C++ library, so there is no source code that needs building. Library headers are contained within the top level [`include/cute`](../../../include/cute) directory, with components of the library grouped by directories that represent their semantics. + +| Directory | Contents | +|------------------------|------------------------| +| [`include/cute`](../../../include/cute) | Each header in the top level corresponds to one of the fundamental building blocks of CuTe, such as [`Layout`](../../../include/cute/layout.hpp) or [`Tensor`](../../../include/cute/tensor.hpp). | +| [`include/cute/container`](../../../include/cute/container) | Implementations of STL-like container objects, such as tuple, array, aligned array, and array views. | +| [`include/cute/numeric`](../../../include/cute/numeric) | Templates that handle nonstandard floating-point types, unsigned integers, complex numbers, and integer sequence - like fundamental numeric data types. | +| [`include/cute/algorithm`](../../../include/cute/algorithm) | Implementations of utility algorithms such as copy, fill, and clear that automatically leverage architecture-specific features if available. | +| [`include/cute/arch`](../../../include/cute/arch) | Wrappers for architecture-specific matrix-matrix multiply and copy instructions. | +| [`include/cute/atom`](../../../include/cute/atom) | Meta-information for instructions in `arch` and utilities like partitioning and tiling. + +## Tutorial + +This directory contains a CuTe tutorial in Markdown format. +The file +[`0x_gemm_tutorial.md`](./0x_gemm_tutorial.md) +explains how to implement dense matrix-matrix multiply using CuTe components. +It gives a broad overview of CuTe and thus would be a good place to start. + +Other files in this directory discuss specific parts of CuTe. + +* [`01_layout.md`](./01_layout.md) describes `Layout`, CuTe's core abstraction. + +* [`02_layout_operations.md`](./02_layout_operations.md) describes more advanced `Layout` operations and the CuTe layout algebra. + +* [`03_tensor.md`](./03_tensor.md) describes `Tensor`, + a multidimensional array abstraction which composes `Layout` + with an array of data. + +* [`04_algorithms.md`](./04_algorithms.md) summarizes CuTe's + generic algorithms that operate on `Tensor`s. + +* [`0t_mma_atom.md`](./0t_mma_atom.md) demonstrates CuTe's meta-information and interface to our GPUs' + architecture-specific Matrix Multiply-Accumulate (MMA) instructions. + +* [`0x_gemm_tutorial.md`](./0x_gemm_tutorial.md) walks through building a GEMM from scratch using CuTe. + +* [`0y_predication.md`](./0y_predication.md) explains what to do + if a tiling doesn't fit evenly into a matrix. + +* [`0z_tma_tensors.md`](./0z_tma_tensors.md) summarizes + how CuTe supports TMA loads and stores. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/01_layout.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/01_layout.md new file mode 100644 index 0000000000000000000000000000000000000000..c1a25ac1bcfaa6ae01591e3f837b06cc19955e49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/01_layout.md @@ -0,0 +1,266 @@ +# CuTe Layouts + +## Layout + +This document describes `Layout`, CuTe's core abstraction. +A `Layout` maps from a logical coordinate space +to an index space. + +`Layout`s present a common interface to multidimensional array access +that abstracts away the details of how the array's elements are organized in memory. +This lets users write algorithms that access multidimensional arrays generically, +so that layouts can change, without users' code needing to change. + +CuTe also provides an "algebra of `Layout`s." +`Layout`s can be combined and manipulated +to construct more complicated layouts +and to partition them across other layouts. +This can help users do things like partition layouts of data over layouts of threads. + +## Layouts and Tensors + +Any of the `Layout`s discussed in this section can be composed with data -- e.g., a pointer or an array -- to create a `Tensor`. +The `Layout`'s logical coordinate space represents the logical "shape" of the data, +e.g., the modes of the `Tensor` and their extents. +The `Layout` maps a logical coordinate into an index, +which is an offset to be used to index into the array of data. + +For details on `Tensor`, please refer to the +[`Tensor` section of the tutorial](./03_tensor.md). + +## Shapes and Strides + +A `Layout` is a pair of `Shape` and `Stride`. +Both `Shape` and `Stride` are `IntTuple` types. + +### IntTuple + +An `IntTuple` is defined recursively as either a single integer, or a tuple of `IntTuple`s. +This means that `IntTuple`s can be arbitrarily nested. +Operations defined on `IntTuple`s include the following. + +* `get(IntTuple)`: The `I`th element of the `IntTuple`. For an `IntTuple` consisting of a single integer, `get<0>` is just that integer. + +* `rank(IntTuple)`: The number of elements in an `IntTuple`. A single integer has rank 1, and a tuple has rank `tuple_size`. + +* `depth(IntTuple)`: The number of hierarchical `IntTuple`s. A single integer has depth 0, a tuple of integers has depth 1, a tuple that contains a tuple of integers has depth 2, etc. + +* `size(IntTuple)`: The product of all elements of the `IntTuple`. + +We write `IntTuple`s with parenthesis to denote the hierarchy. For example, `6`, `(2)`, `(4,3)`, `(3,(6,2),8)` are all `IntTuple`s. + +## Layout + +A `Layout` is then a pair of `IntTuple`s. The first element defines the abstract *shape* of the `Layout`, and the second element defines the *strides*, which map from coordinates within the shape to the index space. + +Since a `Layout` is just a pair of `IntTuple`s, we can define operations on `Layout`s analogous to those defined on `IntTuple`. + +* `get(Layout)`: The `I`th sub-layout of the `Layout`. + +* `rank(Layout)`: The number of modes in a `Layout`. + +* `depth(Layout)`: The number of hierarchical `Layout`s. A single integer has depth 0, a tuple of integers has depth 1, a tuple that contains a tuple of integers has depth 2, etc. + +* `shape(Layout)`: The shape of the `Layout`. + +* `stride(Layout)`: The stride of the `Layout`. + +* `size(Layout)`: The logical extent of the `Layout`. Equivalent to `size(shape(Layout))`. + +### Hierarchical access functions + +`IntTuple`s and thus `Layout`s can be arbitrarily nested. +For convenience, we define versions of some of the above functions +that take a sequence of integers, instead of just one integer. +This makes it possible to access elements +inside of nested `IntTuple` or `Layout`. +For example, we permit `get(x)`, where `I...` here +and throughout this section is a "C++ parameter pack" +that denotes zero or more (integer) template arguments. +That is, `get(x)` is equivalent to +`get(` $\dots$ `(get(get(x)))` $\dots$ `))`, +where the ellipses are pseudocode and not actual C++ syntax. +These hierarchical access functions include the following. + +* `rank(x) := rank(get(x))`. The rank of the `I...`th element of `x`. + +* `depth(x) := depth(get(x))`. The depth of the `I...`th element of `x`. + +* `size(x) := size(get(x))`. The size of the `I...`th element of `x`. + +### Vector examples + +We define a vector as any `Shape` and `Stride` pair with `rank == 1`. +For example, the `Layout` + +``` +Shape: (8) +Stride: (1) +``` + +defines a contiguous 8-element vector. +For a vector with the same Shape but a Stride of `(2)`, +the interpretation is that the eight elements +are stored at positions 0, 2, 4, $\dots$, 14. + +By the above definition, we *also* interpret + +``` +Shape: ((4,2)) +Stride: ((1,4)) +``` + +as a vector, since its shape is rank 1. The inner shape describes a 4x2 layout of data in column-major order, but the extra pair of parenthesis suggest we can interpret those two modes as a single 1-D 8-element vector instead. Due to the strides, the elements are also contiguous. + +### Matrix examples + +Generalizing, we define a matrix as any `Shape` and `Stride` pair with rank 2. For example, + +``` +Shape: (4,2) +Stride: (1,4) + 0 4 + 1 5 + 2 6 + 3 7 +``` + +is a 4x2 column-major matrix, and + +``` +Shape: (4,2) +Stride: (2,1) + 0 1 + 2 3 + 4 5 + 6 7 +``` + +is a 4x2 row-major matrix. + +Each of the modes of the matrix can also be split into *multi-indices* like the vector example. +This lets us express more layouts beyond just row major and column major. For example, + +``` +Shape: ((2,2),2) +Stride: ((4,1),2) + 0 2 + 4 6 + 1 3 + 5 7 +``` + +is also logically 4x2, with a stride of 2 across the rows but a multi-stride down the columns. +Since this layout is logically 4x2, +like the column-major and row-major examples above, +we can _still_ use 2-D coordinates to index into it. + +## Constructing a `Layout` + +A `Layout` can be constructed in many different ways. +It can include any combination of compile-time (static) integers +or run-time (dynamic) integers. + +```c++ +auto layout_8s = make_layout(Int<8>{}); +auto layout_8d = make_layout(8); + +auto layout_2sx4s = make_layout(make_shape(Int<2>{},Int<4>{})); +auto layout_2sx4d = make_layout(make_shape(Int<2>{},4)); + +auto layout_2x4 = make_layout(make_shape (2, make_shape (2,2)), + make_stride(4, make_stride(2,1))); +``` + +The `make_layout` function returns a `Layout`. +It deduces the returned `Layout`'s template arguments from the function's arguments. +Similarly, the `make_shape` and `make_stride` functions +return a `Shape` resp. `Stride`. +CuTe often uses these `make_*` functions, +because constructor template argument deduction (CTAD) +does not work for `cute::tuple` as it works for `std::tuple`. + +## Using a `Layout` + +The fundamental use of a `Layout` is to map between logical coordinate space(s) and an index space. For example, to print an arbitrary rank-2 layout, we can write the function + +```c++ +template +void print2D(Layout const& layout) +{ + for (int m = 0; m < size<0>(layout); ++m) { + for (int n = 0; n < size<1>(layout); ++n) { + printf("%3d ", layout(m,n)); + } + printf("\n"); + } +} +``` + +which produces the following output for the above examples. + +``` +> print2D(layout_2sx4s) + 0 2 4 6 + 1 3 5 7 +> print2D(layout_2sx4d) + 0 2 4 6 + 1 3 5 7 +> print2D(layout_2x4) + 0 2 1 3 + 4 6 5 7 +``` + +The multi-indices within the `layout_2x4` example are handled as expected and interpreted as a rank-2 layout. + +Note that for `layout_2x4`, we're using a 1-D coordinate for a 2-D multi-index in the second mode. In fact, we can generalize this and treat all of the above layouts as 1-D layouts. For instance, the following `print1D` function + +```c++ +template +void print1D(Layout const& layout) +{ + for (int i = 0; i < size(layout); ++i) { + printf("%3d ", layout(i)); + } +} +``` + +produces the following output for the above examples. + +``` +> print1D(layout_8s) + 0 1 2 3 4 5 6 7 +> print1D(layout_8d) + 0 1 2 3 4 5 6 7 +> print1D(layout_2sx4s) + 0 1 2 3 4 5 6 7 +> print1D(layout_2sx4d) + 0 1 2 3 4 5 6 7 +> print1D(layout_2x4) + 0 4 2 6 1 5 3 7 +``` + +This shows explicitly that all of the layouts are simply folded views of an 8-element array. + +## Summary + +* The `Shape` of a `Layout` defines its coordinate space(s). + + * Every `Layout` has a 1-D coordinate space. + This can be used to iterate in a "generalized-column-major" order. + + * Every `Layout` has a R-D coordinate space, + where R is the rank of the layout. + These spaces are ordered _colexicographically_ + (reading right to left, instead of "lexicographically," + which reads left to right). + The enumeration of that order + corresponds to the 1-D coordinates above. + + * Every `Layout` has an h-D coordinate space where h is "hierarchical." These are ordered colexicographically and the enumeration of that order corresponds to the 1-D coordinates above. An h-D coordinate is congruent to the `Shape` so that each element of the coordinate has a corresponding element of the `Shape`. + +* The `Stride` of a `Layout` maps coordinates to indices. + + * In general, this could be any function from 1-D coordinates (integers) to indices (integers). + + * In `CuTe` we use an inner product of the h-D coordinates with the `Stride` elements. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/02_layout_operations.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/02_layout_operations.md new file mode 100644 index 0000000000000000000000000000000000000000..7860cb7db41bc28454642f2947c3a3630b198cc1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/02_layout_operations.md @@ -0,0 +1,833 @@ +# CuTe Layout Operations + +CuTe provides an "algebra of `Layout`s." +`Layout`s can be combined and manipulated +to construct more complicated `Layout`s. +This includes tiling and partitioning `Layout`s across other `Layout`s. +In this section, we explain some of these core operations in detail. + +## How do I print CuTe objects on host or device? + +CuTe comes with different ways to print CuTe objects. +You can print human-readable text, +or you can print LaTeX commands for generating +a beautifully formatted and colored table +describing the CuTe object. +Both of these can be helpful for reasoning about or debugging +layouts, copy atoms, or matrix multiply atoms +(don't worry, we'll explain all of these things in this tutorial). + +CuTe's print functions work on either host or device. +Note that on device, printing is expensive. +Even just leaving print code in place on device, +even if it is never called +(e.g., printing in an `if` branch that is not taken at run time), +may generate slower code. +Thus, be sure to remove code that prints on device after debugging. + +The following code examples assume that you have a +`using namespace cute;` statement in scope. + +### Printing human-readable text + +The `cute::print` function has overloads for almost all CuTe types, including Pointers, Layout, Shape, Stride, and Tensors. When in doubt, try calling `print` on it. You might also only want to print on thread 0 of each thread block, or block 0 of the grid. The `thread0()` function returns true only for global thread 0 of the kernel. A typical idiom for printing CuTe objects to print only on thread 0 of block 0. + +```c++ +if (thread0()) { + print(some_cute_object); +} +``` + +Some algorithms do different things on different threads or blocks, +so you might sometimes need to print on threads or blocks other than zero. +The header file +[`cute/util/debug.hpp`](../../../include/cute/util/debug.hpp), +among other utilities, +includes the function `bool thread(int tid, int bid)` +that returns `true` if running on thread `tid` and block `bid`. + +Some CuTe types have special printing functions that use a different output format. +For example, `print_layout` can display a rank-2 layout in a table +(using plain text formatting). +It has an overload taking a rank-2 matrix layout and a thread layout, +that displays a table with the mapping between threads and values. + +### Printing LaTeX output + +The `cute::print_latex` function works like `cute::print`, +but prints LaTeX commands that you can use +to generate a nicely formatted and colored table. + +## Fundamental types + +### Layout and its components + +This directory includes +[an overview of CuTe's fundamental types for describing layouts](./01_layout.md). + +#### Tuple + +CuTe starts with a Tuple, which is a finite ordered list of zero or more elements. +In C++, we identify a Tuple with the +[`cute::tuple` class](../../../include/cute/container/tuple.hpp). +`cute::tuple` behaves like `std::tuple`, but it works on device or host, +and it imposes restrictions on its template arguments for performance and simplicity. + + +#### IntTuple + +CuTe then defines an IntTuple as either an integer, or a Tuple of IntTuple. +This recursive definition lets us build arbitrarily nested layouts. +In C++, we identify an IntTuple with [`IntTuple`](../../../include/cute/int_tuple.hpp), +which is just an alias of `cute::tuple`. +Any of the following are thus valid template arguments of IntTuple. + +1. "Run-time integers" (or "static integers") + are just ordinary integral types like `int` or `size_t`. + +2. "Compile-time integers" include `std::integral_constant` + or subclasses of it that CuTe defines, + such as `Int` (see below). + These types all have in common + that the value is encoded in the type itself + (as a public `static constexpr value` member). + CuTe defines aliases `_1`, `_2`, `_3` etc. + to the types `Int<1>`, `Int<2>`, `Int<3>` etc. + +3. `IntTuple` with any valid template arguments. + +CuTe reuses IntTuple for many different things, +including Shape, Stride, Step, and Coord +(see [`include/cute/layout.hpp`](../../../include/cute/layout.hpp)). +In C++, Shape, Stride, Step, and Coord are all aliases for IntTuple. + +### Layout + +A Layout is a tuple of (Shape, Stride). +Semantically, it implements a mapping from +a "logical" Shape-shaped (multidimensional) index, +to a "physical" 1-D index into an array. +Here is an example of a 2 x 3 array with static strides (3, 1). + +```c++ +Layout layout = make_layout(make_shape (_2{}, _3{}), + make_stride(_3{}, _1{})); +print_layout(layout); +for (int i = 0; i < size(layout); ++i) { + print(layout(i)); + print(", "); +} +print("\n"); +print(layout(1, 1)); +print("\n"); +``` + +This code produces the following text output. + +```text +(_2,_3):(_3,_1) + 0 1 2 + +---+---+---+ + 0 | 0 | 1 | 2 | + +---+---+---+ + 1 | 3 | 4 | 5 | + +---+---+---+ +0, 3, 1, 4, 2, 5, +4 +``` + +`print(layout(1, 1))` prints the mapping of +the logical 2-D coordinate (1,1) to the 1-D index, which is 4. +You can see that from the table, +which shows the left logical index as the "row," +and the right logical index as the "column." + +### Underscore (`_`) + +An Underscore is a special type used for array slices. The underscore punctuation `_` is a constant instance of Underscore. It acts like `:` (the colon punctuation) in Python or Fortran array slices. See [`include/cute/underscore.hpp`](../../../include/cute/underscore.hpp). + +### Tile + +"A Tile is not a Layout, it's a tuple of Layouts or Tiles or Underscores." +See [`include/cute/tile.hpp`](../../../include/cute/tile.hpp). + +The algebraic layout operations discussed below are defined on `Layout`s, but `Tile` allows these operations to recurse and to be applied to sublayouts or particular modes of a given Layout. These are referred to as by-mode operations. + +See the section on "Logical Divide" to see an example of using `Tile` to extract portions of a row-mode and portions of a column-mode independently. + +## Layout definitions and operations + +### Layouts are functions from integers (logical 1-D coordinate) to integers (1-D index) + +The `for` loop in the above print example shows how CuTe identifies 1-D coordinates with a column-major layout of logical 2-D coordinates. Iterating from `i = 0` to `size(layout)` (which is 6), and indexing into our layout with the single integer coordinate `i`, traverses the layout in column-major fashion, even though this is a row-major layout. You can see this from the output of the `for` loop (0, 3, 1, 4, 2, 5). CuTe calls this index `i` a "1-D coordinate," versus the "natural coordinate," which would be the logical 2-D coordinate. + +If you're familiar with the C++23 feature `mdspan`, +this is an important difference between +`mdspan` layout mappings and CuTe `Layout`s. +`mdspan` layout mappings are *one way*: +they always take a multidimensional logical coordinate, +and they return an integer offset. +Depending on the strides, +the offset may skip over elements of the physical 1-D array. +Thus, `mdspan`'s offset does NOT mean the same thing as +the 1-D logical coordinate `i` in the `for` loop above. +You can iterate correctly over any CuTe `Layout` +by using the 1-D logical coordinate. +`mdspan` doesn't have an idea of a 1-D logical coordinate. + +### Rank, depth, size, cosize + +*Rank*: the tuple size of the layout's shape. + +*Depth*: the depth of the layout's shape. A single integer has depth 0. A tuple has depth 1 + the max depth of its components. + +*Size*: Size of the shape; size of the domain of the function. This is the product of all extents in the layout's shape. + +*Cosize*: Size of the function's codomain (not necessarily the range); for a layout A, A(size(A) - 1) + 1. (Here, we use size(A) - 1 as a 1-D logical coordinate input.) + +### Layout compatibility + +We say that layouts A and B are *compatible* if their shapes are compatible. Shape A is compatible with shape B if any natural coordinate of A is also a valid coordinate for B. + +### Flatten + +The `flatten` operation "un-nests" a potentially nested Layout. For example, + +```c++ +Layout layout = Layout, _1>, + Stride, _0>>{}; +Layout flat_layout = flatten(layout); +``` + +results in `flat_layout` having the following type + +```text +Layout, Stride<_3, _1, _0>> +``` + +and + +```c++ +Layout layout = Layout>, + Stride<_4, Stride<_1, _16>>>{}; +Layout flat_layout = flatten(layout); +``` + +results in `flat_layout` having the following type + +```text +Layout, Stride<_4, _1, _16>> +``` + +Hierarchical Layouts and flattening let us reinterpret tensors in place as matrices, matrices as vectors, vectors as matrices, etc. This lets us implement arbitrary tensor contractions as batched matrix multiply, by combining the contraction modes into a single mode, and combining the A, B, C, and "batch" modes as needed to reach the desired form. + +### Coalesce + +The `coalesce` operation first flattens the layout, then combines all the modes that are possible to combine, starting with mode 0 (the leftmost mode) and moving right. If all the modes can be combined, then this results in a 1-D layout expressing what array elements the original layout accesses. + +For example, + +```text +layout: (_2,(_1,_6)):(_1,(_6,_2)) +coalesce(layout): _12:_1 +``` + +What does it mean to "combine" modes? In the above example, the flattened layout is (2, 1, 6) : (1, 6, 2). + +1. If we look at the leftmost two modes, this is just a vector of length 2 and stride 1. The middle mode has extent 1, so the corresponding stride 6 would not be observed anyway. This leaves us with (2, 6) : (1, 2). + +2. The intermediate result (2, 6) : (1, 2) is just a 2 x 6 column-major matrix, which can be coalesced into a vector of length 12 and stride 1. + +More formally, "combining all the modes" means a left fold, where the binary operation that combines two modes has three cases. + +1. If the leftmost layout is s1:d1, and the next layout is 1:d0, then combine into s1:d1. This generalizes Step 1 above. If a mode has extent 1, we can't observe its stride, so we can skip the mode. + +2. If the leftmost layout is 1:d1, and the next layout is s0:d0, then combine into s0:d0. Again, if a mode has extent 1, we can't observe its stride, so we can skip the mode. + +3. If the leftmost layout is s1:d1, and the next layout is s0 : s1*d1, then combine into s0 * s1 : d1. This generalizes Step 2 above. One can call this "noticing a column-major layout sequence." + +That's it! For example, the result of coalescing the row-major layout (2, 2) : (2, 1) is (2, 2) : (2, 1), the same layout, because none of the above three cases applies. + +### Complement + +#### Definition + +The complement B of a layout A with respect to an integer M satisfies the following properties. + +1. $A$ and $B$ are *disjoint*: $A(x) \neq B(x)$ for all $x \neq 0$ in the domain of $A$. + +2. B is *ordered*: $B(x-1) \lt B(x)$ for all $x$ in $\{0, 1, \dots, size(B) - 1\}$. + +3. B is *bounded* by M: $size(B) \geq M / size(A)$, and $cosize(B) \leq floor(M / cosize(A)) * cosize(A)$. + +Regarding disjointness: we need to specify $x \neq 0$ because CuTe layouts are linear. That is, if the domain is nonempty, the range always contains zero. + +Regarding the ordered property: CuTe layouts are hierarchically strided, so this implies that if size(B) is nonzero, then the strides of B are all positive. + +#### Examples + +complement(4:1, 24) is 6:4. + +1. The result is disjoint of 4:1, so it must have a stride of at least 4 (since it includes 0, but must skip over 1, 2, 3). + +2. The size of the result is $\geq 24 / 4 = 6$. (This plus Step (1) means that the cosize is at least 24.) + +3. The cosize of the result is $\leq (24 / 4) * 4 = 24$. (This plus Step (2) means that the cosize is exactly 24.) + +4. The only (1-D) layout with size 6 and cosize 24 is 6:4. + +complement(6:4, 24) is 4:1. + +1. 4:1 is disjoint of 6:4, but so is s:d + for any s > 0 and d > 20. + +2. The size of the result is $\geq 24 / 6 = 4$. + +3. The cosize of the result is $\leq (24 / 21) * 21 = 21$. + +4. The stride cannot be greater than 20 + (else (2) would contradict (3)), + so it must be less than 4. + +5. This leaves 4:1 by elimination. + +### Composition + +Layouts are functions, so composition of layouts is just composition of functions. The composition $A \circ B$ means "apply the layout B first, then treat the result as a 1-D logical coordinate input to the layout A, and apply A to it." Very often, this composition can be represented as another Layout. + +#### Rules for computing composition + +Both humans and CuTe compute composition using the following rules. + +1. $A \circ B$ has a shape that is compatible with B. In function composition, the rightmost function defines the domain. For `Layout`s this means that any valid coordinate for $B$ can also be used as a coordinate for $A \circ B$. + +2. Concatenation: A layout can be expressed as the concatenation of its sublayouts. We denote concatenation with parentheses: $B = (B_0,B_1,...)$. The CuTe function `make_layout`, when given zero or more `Layout`s, concatenates them. + +3. Composition is (left-)distributive with concatenation: $A \circ B = A \circ (B_0, B_1, ...) = (A \circ B_0, A \circ B_1, ...)$. + +4. "Base case": For layouts $A = a : b$ and $B = c : d$ with integral shape and stride, $A \circ B = R = c : (b * d)$. + +5. By-mode composition: Let $\langle B, C \rangle$ (angle brackets, not parentheses) + denote a tuple of two layouts B and C, not their concatenation. Let $A = (A_0, A_1)$. + Then, $A \circ \langle B, C \rangle = (A_0, A_1) \circ \langle B, C \rangle = (A_0 \circ B, A_1 \circ C)$. + This allows the application of composition independently to sublayouts of $A$. + +#### Examples: Reshape a vector into a matrix + +This section gives two composition examples. Both start with a vector with layout $20:2$ (that is, the vector has 20 elements, and the stride between each is 2). They compose this vector with a 4 x 5 matrix layout. This effectively "reshapes" the vector in place into a matrix. + +##### Example 1 + +$20:2 \circ (4,5) : (1,4)$. + +This describes interpreting the vector $20:2$ +as a 4 x 5 column-major matrix. + +The resulting layout has shape $(4,5)$, +because in function composition, +the rightmost function defines the domain. +What are the strides? + +1. A layout can be expressed as the concatenation of its sublayouts, + so $(4,5) : (1,4)$ is $(4:1, 5:4)$. + +2. Composition is distributive, so + $20:2 \circ (4:1, 5:4)$ is $(20:2 \circ 4:1, 20:2 \circ 5:4)$. + +3. $20:2 \circ 4:1$ has shape 4 (rightmost function defines the domain) + and stride $2 = 2 \cdot 1$. + +4. $20:2 \circ 5:4$ has shape 5 and stride $8 = 2 \cdot 4$. + +5. Result: (4:2, 5:8), which by concatenation is (4,5) : (2,8). + +#### Example 2 + +$20:2 \circ (4,5) : (5,1)$. + +This describes interpreting the vector 20:2 +as a 4 x 5 row-major matrix. + +The resulting layout has shape $(4,5)$, just as before. What are the strides? + +1. By deconcatenation, $(4,5) : (5,1)$ is $(4:5, 5:1)$. + +2. Composition is distributive, so $20:2 \circ (4:5, 5:1)$ is $(20:2 \circ 4:5, 20:2 \circ 5:1)$. + +3. $20:2 \circ 4:5$ has shape $4$ and stride $10 = 2 \cdot 5$. + +4. $20:2 \circ 5:1$ has shape $5$ and stride $2 = 2 \cdot 1$. + +5. Result: (4:10, 5:2), which by concatenation is (4,5) : (10,2). + +#### Example: Reshape a matrix into another matrix + +The composition $((20,2):(16,4) \circ (4,5):(1,4))$ +expresses reshaping the matrix with layout (20,2):(16:4), +into a 4 x 5 matrix in a column-major way. + +1. By deconcatenation, $(4,5) : (1,4)$ is $(4:1, 5:4)$. + +2. Composition is distributive, so $(20,2):(16,4) \circ (4:1, 5:4)$ is $((20,2):(16,4) \circ 4:1, (20,2):(16,4) \circ 5:4)$. + +3. $(20,2):(16,4) \circ 4:1$ has shape $4$ and stride $16$. (4:1 expresses picking the first 4 consecutive elements of (20,2):(16,4). These elements run down the 0th column (leftmost mode) of the layout, whose stride is 16.) + +4. $(20,2):(16,4) \circ 5:4$ has shape $5$ and stride $64 = 4 \cdot 16$. + +5. Result: $(4:16, 5:64)$, which by concatenation is $(4,5) : (16,64)$. + +We get exactly this result with CuTe +if we use compile-time shapes and strides. +The following C++ code prints `(_4,_5):(_16,_64).` + +```c++ +using namespace cute; +auto a = make_layout(make_shape(Int<20>{}, _2{}), make_stride(_16{}, _4{})); +auto b = make_layout(make_shape( _4{}, _5{}), make_stride( _1{}, _4{})); +auto c = composition(a, b); +printf("\n"); +print(c); +``` + +Results may _look_ different (but are the same mathematically) +if we use run-time integers. +The following C++ code prints `((4,1),(5,1)):((16,4),(64,4)).` + +```c++ +using namespace cute; +auto a = make_layout(make_shape(20, 2), make_stride(16, 4)); +auto b = make_layout(make_shape( 4, 5), make_stride( 1, 4)); +auto c = composition(a, b); +printf("\n"); +print(c); +``` + +((4,1),(5,1)):((16,4),(64,4)) is effectively the same layout +as (4,5) : (16,64), because the 1s in the shape don't affect the layout +(as a mathematical function from one integer to one integer). +CuTe chooses not to simplify layout computations +with run-time values in them as much as it could, +because simplifications involving run-time values have a run-time cost. + +### Product + +CuTe includes four different kinds of layout products. + +1. `logical_product` + +2. `blocked_product` + +3. `raked_product` + +4. `tiled_product` + +`logical_product(A, B)` results in a layout where each element of layout B +has been replaced by a "copy" of layout A. +The other three products offer variations of this idea. + +#### Example: Tiled matrix + +Suppose that I want to make a matrix consisting of 3 x 4 tiles +in a row-major arrangement, +where each tile is a 2 x 2 column-major matrix. + +The Layout of each tile (tile) has Shape (2,2) and Stride (1,2). + +The Layout of the "matrix of tiles" (`matrix_of_tiles`) +has Shape (3,4) and Stride (4,1). + +##### Blocked product: the intuitive tiling + +If I were to deduce by hand what the layout of the tiled matrix should be, +it would look like this. + +| | (0,0) | (1,0) | (0,1) | (1,1) | (0,2) | (1,2) | (0,3) | (1,3) | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | +| (0,0) | 0 | 2 | 4 | 6 | 8 | 10 | 12 | 14 | +| (1,0) | 1 | 3 | 5 | 7 | 9 | 11 | 13 | 15 | +| (0,1) | 16 | 18 | 20 | 22 | 24 | 26 | 28 | 30 | +| (1,1) | 17 | 19 | 21 | 23 | 25 | 27 | 29 | 31 | +| (0,2) | 32 | 34 | 36 | 38 | 40 | 42 | 44 | 46 | +| (1,2) | 33 | 35 | 37 | 39 | 41 | 43 | 45 | 47 | + +The row and column labels use the equivalence of 1-D logical coordinates and 2-D column-major coordinates. The left index in each pair is the row resp. column coordinate of the tile, while the right index in each pair is the row resp. column coordinate of the matrix-of-tiles. The resulting layout has Shape ((2, 3), (2, 4)), and Stride ((1, 16), (2, 4)), and the second mode can be coalesced. The Shape ((2, 3), (2, 4)) is hierarchical, but it is still rank-2 and can be drawn in 2D as above. Note how the row mode of the tile remains part of the row mode of the product, and the column mode of the tile remains a column mode of the product. + +The above layout is what `blocked_product(tile, matrix_of_tiles)` produces. +A critical use case for blocked product is "tiling" an "atom" +(some tile that relates to a hardware feature) over a matrix. + +```c++ +Layout tile = Layout, + Stride<_1,_2>>{}; +Layout matrix_of_tiles = Layout, + Stride<_4,_1>>{}; + +print_layout(blocked_product(tile, matrix_of_tiles)); +``` + +##### Logical product + +The logical product `logical_product(tile, matrix_of_tiles)` +results in Shape ((2, 2), (3, 4)) and Stride ((1, 2), (16, 4)). + +| | (0,0) | (1,0) | (2,0) | (0,1) | (1,1) | (2,1) | (0,2) | (1,2) | (2,2) | (0,3) | (1,3) | (2,3) | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | +| (0,0) | 0 | 16 | 32 | 4 | 20 | 36 | 8 | 24 | 40 | 12 | 28 | 44 | +| (1,0) | 1 | 17 | 33 | 5 | 21 | 37 | 9 | 25 | 41 | 13 | 29 | 45 | +| (0,1) | 2 | 18 | 34 | 6 | 22 | 38 | 10 | 26 | 42 | 14 | 30 | 46 | +| (1,1) | 3 | 19 | 35 | 7 | 23 | 39 | 11 | 27 | 43 | 15 | 31 | 47 | + +Note how the tile appears in the leftmost column and is reproduced +in each column in the same order as the matrix-of-tiles. That is, +the tile can be indexed through the first mode of the result and the +matrix-of-tiles can be indexed through the second mode. + +```c++ +Layout tile = Layout, + Stride<_1,_2>>{}; +Layout matrix_of_tiles = Layout, + Stride<_4,_1>>{}; + +print_layout(logical_product(tile, matrix_of_tiles)); +``` + +##### Raked product + +The raked product `raked_product(tile, matrix_of_tiles)` results in +Shape ((3, 2), (4, 2)) and Stride ((16, 1), (4, 2)). + +| | (0,0) | (1,0) | (2,0) | (3,0) | (0,1) | (1,1) | (2,1) | (3,1) | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | +| (0,0) | 0 | 4 | 8 | 12 | 2 | 6 | 10 | 14 | +| (1,0) | 16 | 20 | 24 | 28 | 18 | 22 | 26 | 30 | +| (2,0) | 32 | 36 | 40 | 44 | 34 | 38 | 42 | 46 | +| (0,1) | 1 | 5 | 9 | 13 | 3 | 7 | 11 | 15 | +| (1,1) | 17 | 21 | 25 | 29 | 19 | 23 | 27 | 31 | +| (2,1) | 33 | 37 | 41 | 45 | 35 | 39 | 43 | 47 | + +The tile is now interleaved or "raked" with the other 3x4 matrix-of-tiles +instead of appearing as blocks. Other references call this a "cyclic +distribution." + +This might look familiar if you have ever used ScaLAPACK. +It expresses a 2-D block cyclic distribution of a 6 x 8 matrix +over 4 processes in a 2 x 2 "process grid." See +["The Two-dimensional Block-Cyclic Distribution"](https://netlib.org/scalapack/slug/node75.html#sec2dbcd) +and +["Local Storage Scheme and Block-Cyclic Mapping"](https://netlib.org/scalapack/slug/node76.html#seclocalstorage) +in the ScaLAPACK Users' Guide. + +In general, `logical_product` and these variations can produce any interleaving, +including blocked, cyclic, by-mode blocked/cyclic, and intermediate interleavings +that don't have common names. + +```c++ +Layout tile = Layout, + Stride<_1,_2>>{}; +Layout matrix_of_tiles = Layout, + Stride<_4,_1>>{}; + +print_layout(raked_product(tile, matrix_of_tiles)); +``` + +### Division + +The previous section covered layout products, +that reproduce one layout over another. +This section covers layout *division*. +Functions that divide a layout into components are useful +as a basis for tiling and partitioning layouts. + +For example, consider folding a vector into a matrix. +We could imagine an operation, called `logical_divide`, + +```c++ +Layout vec = Layout<_16,_3>{}; // 16 : 3 +Layout col = Layout< _4,_1>{}; // 4 : 1 +Layout mat = logical_divide(vec, col); // (4,4) : (3,12) +``` + +that "takes" the first 4 elements of the vector into the first mode +and leaves the "rest" in the second mode. This is a column-major matrix +view of the data in `vec`. +What if we want a row-major matrix view? + +```c++ +Layout vec = Layout<_16,_3>{}; // 16 : 3 +Layout col = Layout< _4,_4>{}; // 4 : 4 +Layout mat = logical_divide(vec, col); // (4,4) : (12,3) +``` + +Now, every fourth element of the vector is in the first mode and +the "rest" are in the second mode. +Multidimensional, hierarchical indices let us extend this operation +to any layout that "divides" the vector. + +```c++ +Layout vec = Layout<_16,_3>{}; // 16 : 3 +Layout col = Layout< _4,_2>{}; // 4 : 2 +Layout mat = logical_divide(vec, col); // (4,(2,2)) : (6,(3,24)) +``` + +```c++ +Layout vec = Layout<_16,_3>{}; // 16 : 3 +Layout col = Layout, + Stride<_4,_1>>{}; // (2,2) : (4,1) +Layout mat = logical_divide(vec, col); // ((2,2),(2,2)) : ((12,3),(6,24)) +``` + +All of the above examples produce a 4x4 matrix +that can be indexed and treated like a normal 4x4 matrix, +but each has a different underlying layout. +Thus, our algorithms can be written using logical coordinates, +without needing to address the detailed indexing that each layout requires. + +CuTe includes 3 different kinds of layout division operations. + +1. `logical_divide` + +2. `zipped_divide` + +3. `tiled_divide` + +We will summarize these in the sections that follow. + +#### Logical divide + +##### Example worked in detail + +This section will work the following logical divide example in detail. + +```c++ +Layout a = make_layout(24, 2); +Layout b = make_layout( 4, 2); +Layout c = logical_divide(a, b); +``` + +Logical divide produces a rank-2 `Layout`, +where mode 0 (the leftmost mode) corresponds to the divisor `b`, +and mode 1 (the rightmost mode) corresponds to the "remainder." +Intuitively, the remainder of 24 divided by 4 is 6, +so we know that mode 1 has 6 elements. +We just don't know its shape yet. + +CuTe defines `logical_divide(a, b)` as +`composition(a, make_layout(b, complement(b, size(a))))`. +Here, `size(a)` is 24. +What is `complement(b, 24)`? +Intuitively, it means "the remainder," +what's left over after applying `b` to 0, 1, 2, $\dots$, 23. + +The layout 4:2 means "take 4 elements at even-numbered indices." +The following table overlays the range of 4:2 +atop the complement's codomain 0, 1, $\dots$, 23. + +| Range of 4:2 | 0 | | 2 | | 4 | | 6 | | | | | | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | +| Codomain | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | $\dots$ | 23 | + +Layouts are linear, so their range must include zero. +The complement of 4:2 with respect to 24 is thus a layout whose range + +* includes zero; + +* does not include any other elements of the range of 4:2 + (i.e., satisfies the disjoint property; see above); and + +* includes as much of 0, 1, $\dots$, 23 as possible + (so that it forms the "remainder" of 4:2 with respect to 24). + +Intuitively, the range of the complement must look like this: +0, 1, 8, 9, 16, 17. +The resulting layout is ordered. +It has size 6 and cosize 18, +so it satisfies the bounded property (see above). +This is the layout (2, 3) : (1, 8). +(Going from this intuitive sense of the complement +to knowing how to compute it directly +is out of scope for this part of the tutorial.) + +The following table shows 4:2 with its complement (2, 3) : (1, 8). + +| Range of 4:2 | 0 | | 2 | | 4 | | 6 | | | | | | | | | | | | | | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | +| Codomain | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | $\dots$ | 23 | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | +| Range of complement | 0 | 1 | | | | | | | 8 | 9 | | | | | | | 16 | 17 | | | + +Now we know that `logical_divide`(24:2, 4:2) is +`composition`(24:2, `make_layout`(4:2, (2,3):(1,8))). +The composition of two layouts has the shape of the second (rightmost) layout, +so the resulting shape is (4, (2, 3)). +We see that the leftmost mode 4 corresponds to the divisor 4:2, +and the rightmost mode (2, 3) describes what's "left over" +from the original shape 24. + +What are the strides? +We can start from the leftmost mode. +4:2 takes every other element (the even-numbered elements) of 24:2. +That's a stride-2 thing, striding over a stride-2 thing. +The resulting stride is 4. +Similarly, the stride 2 of 24:2 +doubles the two strides of the rightmost mode. +The resulting layout is (4, (2, 3)) : (4, (2, 16)). + +##### Tiling example + +Suppose I have the 6 x 8 matrix from the Raked Product section +and want to "collect" the `tile`, turning the Raked Product into +the Blocked Product. + +To do this, we would like to gather two elements from the column +and leave the rest, then gather two elements from the row and leave the rest. +Thus, we want to apply `logical_divide` independently to the rows and cols +in order to retrieve the appropriate elements. + +In code, we copy the Layout from the result of the Raked Product section, then +specify the elements in the rows and cols we would like to gather. + +```c++ +Layout raked_prod = Layout,Shape <_4,_2>>, + Stride,Stride<_4,_2>>>{}; +Tile subtile = make_tile(Layout<_2,_3>{}, // Gather elements 2 : 3 from mode 0 + Layout<_2,_4>{}); // Gather elements 2 : 4 from mode 1 + +print_layout(logical_divide(raked_prod, subtile)); +``` + +Indeed, this does produce the result from the Blocked Product section. + +| | (0,0) | (1,0) | (0,1) | (1,1) | (0,2) | (1,2) | (0,3) | (1,3) | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | +| (0,0) | 0 | 2 | 4 | 6 | 8 | 10 | 12 | 14 | +| (1,0) | 1 | 3 | 5 | 7 | 9 | 11 | 13 | 15 | +| (0,1) | 16 | 18 | 20 | 22 | 24 | 26 | 28 | 30 | +| (1,1) | 17 | 19 | 21 | 23 | 25 | 27 | 29 | 31 | +| (0,2) | 32 | 34 | 36 | 38 | 40 | 42 | 44 | 46 | +| (1,2) | 33 | 35 | 37 | 39 | 41 | 43 | 45 | 47 | + +Of course, any other rearrangement of the rows and cols is also valid. + +#### Zipped divide + +The `zipped_divide` function applies `logical_divide`, and then gathers the +"subtiles" into a single mode and the "rest" into a single mode. + +For example, if we apply `zipped_divide` instead of `logical_divide` in the example above, + +```c++ +Layout raked_prod = Layout,Shape <_4,_2>>, + Stride,Stride<_4,_2>>>{}; +Tile subtile = make_tile(Layout<_2,_3>{}, // Gather elements 2 : 3 from mode 0 + Layout<_2,_4>{}); // Gather elements 2 : 4 from mode 1 + +print_layout(zipped_divide(raked_prod, subtile)); +``` + +then we get the result + +| | (0,0) | (1,0) | (2,0) | (0,1) | (1,1) | (2,1) | (0,2) | (1,2) | (2,2) | (0,3) | (1,3) | (2,3) | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | +| (0,0) | 0 | 16 | 32 | 4 | 20 | 36 | 8 | 24 | 40 | 12 | 28 | 44 | +| (1,0) | 1 | 17 | 33 | 5 | 21 | 37 | 9 | 25 | 41 | 13 | 29 | 45 | +| (0,1) | 2 | 18 | 34 | 6 | 22 | 38 | 10 | 26 | 42 | 14 | 30 | 46 | +| (1,1) | 3 | 19 | 35 | 7 | 23 | 39 | 11 | 27 | 43 | 15 | 31 | 47 | + +Note that this is the same layout as the result in the Logical Product section. +That is, the first mode is our original tile (and can be interpreted as a 2x2 matrix itself) +and the second mode is its logical layout within the raked layout. + +#### More Examples of Divide + +For brevity, shapes can be used with `logical_divide` and `tiled_divide` to quickly split and tile modes of a tensor. For example, this C++ code + +```c++ +Layout layout = Layout, + Stride< _1,_128,_0>>{}; +Shape tile_shape = make_shape(_4{},_8{}); +Layout logical_divided_tile = logical_divide(layout, tile_shape); +Layout zipped_divided_tile = zipped_divide(layout, tile_shape); + +print("layout : "); print(layout); print("\n"); +print("tile_shape : "); print(tile_shape); print("\n"); +print("logical_divided_tile : "); print(logical_divided_tile); print("\n"); +print("zipped_divided_tile : "); print(zipped_divided_tile); print("\n\n"); +``` + +produces the following output when we vary `layout`. + +```text +full_layout : (_12,_32,_6):(_1,_128,_0) +tile_shape : (_4,_8) +logical_divided_tile : ((_4,_3),(_8,_4),_6):((_1,_4),(_128,_1024),_0) +zipped_divided_tile : ((_4,_8),(_3,_4,_6)):((_1,_128),(_4,_1024,_0)) + +full_layout : (_12,(_4,_8),_6):(_1,(_32,_512),_0) +tile_shape : (_4,_8) +logical_divided_tile : ((_4,_3),((_4,_2),_4),_6):((_1,_4),((_32,_512),_1024),_0) +zipped_divided_tile : ((_4,(_4,_2)),(_3,_4,_6)):((_1,(_32,_512)),(_4,_1024,_0)) +``` + +This code + +```c++ +Layout layout = make_layout(Shape<_8,_8>{}, + Stride<_8,_1>{}); +Layout tile = make_tile(make_layout(Shape<_4>{}), + make_layout(Shape<_2>{})); +print("layout: "); +print_layout(layout); +print("\n"); +print("tile: "); +print(tile); +print("\n"); +print("logical_divide: "); +print_layout(logical_divide(layout, tile)); +print("zipped_divide: "); +print_layout(zipped_divide(layout, tile)); +``` + +results in the following layouts. + +

+ logical_divide-and-zipped_divide +

+ +This code + +```c++ +Layout layout = make_layout(Shape<_8,_8>{}, + Stride<_8,_1>{}); +Layout tile = make_tile(make_layout(Shape<_2>{}), + make_layout(Shape<_4>{})); +print("layout: "); +print_layout(layout); +print("\n"); +print("tile: "); +print(tile); +print("\n"); +print("logical_divide: "); +print_layout(logical_divide(layout, tile)); +print("zipped_divide: "); +print_layout(zipped_divide(layout, tile)); +``` + +results in the following layouts. + +

+ logical_divide-and-zipped_divide-2 +

+ +#### Tiled divide + +The `tiled_divide` function works like `zipped_divide`, +except that it unpacks the second mode. This is useful when you have a `Tile` that describes all of the elements for a particular operation, for example, and want to gather those together but retain the logical shape of those tiles within the original layout. That is, + +```text +Layout Shape : (M, N, L, ...) +Tile Shape : +Tiled Result : ((M', N'), m, n, L, ...) +``` + +where `m` is `M / M'` and `n` is `N / N'`. +We can consider `m` as the "number of `Tile`s in `M`" and `n` as the "number of `Tile`s in `N`". This style of operation is common when applying MMA Atoms and Copy Atoms. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/03_tensor.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/03_tensor.md new file mode 100644 index 0000000000000000000000000000000000000000..ccd25ae3ee44e56fe46b323525e42fdbfc9133f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/03_tensor.md @@ -0,0 +1,263 @@ +# CuTe Tensors + +## A Tensor is a multidimensional array + +CuTe's `Tensor` class represents a multidimensional array. +The array's elements can live in any kind of memory, +including global memory, shared memory, and register memory. + +### Array access + +Users access a `Tensor`'s elements in one of three ways: + +* `operator()`, taking as many integral arguments as the number of modes, + corresponding to the element's (possibly) multidimensional logical index; + +* `operator()`, taking a `Coord` (an `IntTuple` of the logical indices); or + +* `operator[]`, taking a `Coord` (an `IntTuple` of the logical indices). + +### Slices: Get a Tensor accessing a subset of elements + +Users can get a "slice" of a `Tensor`, +that is, a `Tensor` that accesses a subset of elements +of the original `Tensor`. + +Slices happen through the same `operator()` +that they use for accessing an individual element. +Passing in `_` (the underscore character, an instance of `Underscore`) +has the same effect as `:` (the colon character) in Fortran or Matlab: +the resulting slice accesses all indices in that mode of the `Tensor`. + +### Tensor's behavior determined by its Layout and Engine + +A `Tensor`'s behavior is entirely determined by its two components, +which correspond to its two template parameters: `Engine`, and `Layout`. + +For a description of `Layout`, +please refer to [the `Layout` section](./01_layout.md) +of this tutorial, or the [GEMM overview](./0x_gemm_tutorial.md). + +An `Engine` represents a one-dimensional array of elements. +When users perform array access on a `Tensor`, +the `Tensor` uses its `Layout` to map from a logical coordinate +to a one-dimensional index. +Then, the `Tensor` uses its `Engine` +to map the one-dimensional index +to a reference to the element. +You can see this in `Tensor`'s implementation of array access. + +```c++ +decltype(auto) operator[](Coord const& coord) { + return engine().begin()[layout()(coord)]; +} +``` + +One could summarize almost all CuTe use cases as follows: + +* create `Layout`s, + +* create `Tensor`s with those `Layout`s, and + +* invoke (either CuTe's, or custom) algorithms on those `Tensor`s. + +### Ownership of the elements + +`Tensor`s can be owning or nonowning. + +"Owning" `Tensor`s behave like `std::array`. +When you copy the `Tensor`, you (deep-)copy its elements, +and the `Tensor`'s destructor deallocates the array of elements. + +"Nonowning" `Tensor`'s behave like a (raw) pointer to the elements. +Copying the `Tensor` doesn't copy the elements, +and destroying the `Tensor` doesn't deallocate the array of elements. + +Whether a `Tensor` is "owning" or "nonowning" depends entirely on its `Engine`. +This has implications for developers of generic `Tensor` algorithms. +For example, input `Tensor` parameters of a function +should be passed by const reference, +because passing the `Tensor`s by value +might make a deep copy of the `Tensor`'s elements. +It might also *not* make a deep copy of the elements; +there's no way to know without specializing the algorithm +on the `Tensor`'s `Engine` type. +Similarly, output or input/output `Tensor` parameters of a function +should be passed by (nonconst) reference. +Returning a `Tensor` might (or might not) +make a deep copy of the elements. + +The various overloads of the `copy_if` algorithm in +[`include/cute/algorithm/copy.hpp`](../../../include/cute/algorithm/copy.hpp) +take their `src` (input, source of the copy) parameter +as `Tensor& const`, +and take their `dst` (output, destination of the copy) parameter +as `Tensor&`. +Additionally, there are overloads for mutable temporaries like +`Tensor&&` +so that these algorithms can be applied directly to slices, +as in the following example. + +```c++ +copy(src_tensor(_,3), dst_tensor(2,_)); +``` + +In C++ terms, each of the expressions +`src_tensor(_,3)`, and `dst_tensor(2,_)` +is in the "prvalue" +[value category](https://en.cppreference.com/w/cpp/language/value_category), +because it is a function call expression +whose return type is nonreference. +(In this case, calling `Tensor::operator()` +with at least one `_` (`Underscore`) argument +returns a `Tensor`.) +The prvalue `dst_tensor(2,_)` won't match +the `copy` overload taking +`Tensor&`, +because prvalues can't be bound to +nonconst lvalue references (single `&`). +However, it will match the `copy` overload taking +`Tensor&&` +(note the two `&&` instead of one `&`). +Calling the latter overload binds the reference +to the prvalue `dst_tensor(2,_)`. +This results in +[creation of a temporary](https://en.cppreference.com/w/cpp/language/implicit_conversion#Temporary_materialization) +`Tensor` result to be passed into `copy`. + +### CuTe's provided `Engine` types + +CuTe comes with a few `Engine` types. +Here are the three that new users are most likely to encounter first. + +* `ArrayEngine`: an owning `Engine`, + representing an array of `N` elements of type `T` + +* `ViewEngine`: a nonowning `Engine`, + where `Iterator` is a random access iterator + (either a pointer to an array, or something that acts like one) + +* `ConstViewEngine`: a nonowning `Engine`, + which is the view-of-const-elements version of `ViewEngine` + +### "Tags" for different kinds of memory + +`ViewEngine` and `ConstViewEngine` wrap pointers to various kinds of memory. +Users can "tag" the memory with its space -- e.g., global or shared -- +by calling `make_gmem_ptr(g)` when `g` is a pointer to global memory, +or `make_smem_ptr(s)` when `s` is a pointer to shared memory. + +Tagging memory makes it possible for CuTe's `Tensor` algorithms +to use the fastest implementation for the specific kind of memory. +It also avoids incorrect memory access. +For example, some kinds of optimized copy operations require +that the source of the copy be in global memory, +and the destination of the copy be in shared memory. +Tagging makes it possible for CuTe to dispatch +to those optimized copy operations where possible. +CuTe does this by specializing `Tensor` algorithms +on the `Tensor`'s `Engine` type. + +### Engine members + +In order for a type to be valid for use as an `Engine`, +it must have the following public members. + +```c++ +using value_type = /* ... the value type ... */; +using iterator = /* ... the iterator type ... */; +iterator begin() /* sometimes const */; +``` + +## Constructing a Tensor + +### Nonowning view of existing memory + +A `Tensor` can be a nonowning view of existing memory. +For this use case, users can create the `Tensor` by calling `make_tensor` +with two arguments: a wrapped pointer to the memory to view, and the `Layout`. +Users wrap the pointer by identifying its memory space: +e.g., global memory (via `make_gmem_ptr`) or shared memory (via `make_smem_ptr`). +`Tensor`s that view existing memory can have either static or dynamic `Layout`s. + +Here are some examples of creating `Tensor`s +that are nonowning views of existing memory. + +```c++ +// Global memory (static or dynamic layouts) +Tensor gmem_8s = make_tensor(make_gmem_ptr(A), Int<8>{}); +Tensor gmem_8d = make_tensor(make_gmem_ptr(A), 8); +Tensor gmem_8sx16d = make_tensor(make_gmem_ptr(A), make_shape(Int<8>{},16)); +Tensor gmem_8dx16s = make_tensor(make_gmem_ptr(A), make_shape ( 8 ,Int<16>{}), + make_stride(Int<16>{},Int< 1>{})); + +// Shared memory (static or dynamic layouts) +Shape smem_shape = make_shape(Int<4>{},Int<8>{}); +__shared__ T smem[decltype(size(smem_shape))::value]; // (static-only allocation) +Tensor smem_4x8_col = make_tensor(make_smem_ptr(&smem[0]), smem_shape); +Tensor smem_4x8_row = make_tensor(make_smem_ptr(&smem[0]), smem_shape, GenRowMajor{}); +``` + +### Owning array of register memory + +A `Tensor` can also be an owning array of register memory. +For this use case, users can create the `Tensor` +by calling `make_tensor(layout)`, +where `T` is the type of each element of the array, +and `layout` is the `Tensor`'s `Layout`. +Owning `Tensor`s must have a static `Layout`, +as CuTe does not perform dynamic memory allocation in `Tensor`s. + +Here are some examples of creating owning `Tensor`s. + +```c++ +// Register memory (static layouts only) +Tensor rmem_4x8_col = make_tensor(make_shape(Int<4>{},Int<8>{})); +Tensor rmem_4x8_row = make_tensor(make_shape(Int<4>{},Int<8>{}), GenRowMajor{}); +Tensor rmem_4x8_mix = make_tensor(make_shape (Int<4>{},Int< 8>{}), + make_stride(Int<2>{},Int<32>{})); +Tensor rmem_8 = make_fragment_like(gmem_8sx16d(_,0)); +``` + +The `make_fragment_like` function makes an owning Tensor of register memory, +with the same shape as its input `Tensor` argument. + +## Tensor use examples + +### Copy rows of a matrix from global memory to registers + +The following example copies rows of a matrix (with any `Layout`) +from global memory to register memory, +then executes some algorithm `do_something` +on the row that lives in register memory. + +```c++ +Tensor gmem = make_tensor(make_gmem_ptr(A), make_shape(Int<8>{}, 16)); +Tensor rmem = make_fragment_like(gmem(_, 0)); +for (int j = 0; j < size<1>(gmem); ++j) { + copy(gmem(_, j), rmem); + do_something(rmem); +} +``` + +This code does not need to know anything the `Layout` of `gmem` +other than that it is rank-2 and that the first mode is a compile-time value. +The following code checks both of those conditions at compile time. + +```c++ +CUTE_STATIC_ASSERT_V(rank(gmem) == Int<2>{}); +CUTE_STATIC_ASSERT_V(is_static(gmem))>{}); +``` + +A `Tensor` encapsulates the data type, data location, +and possibly also the shape and stride of the tensor at compile time. +As a result, `copy` can dispatch, based on the types and Layouts of its arguments, +to use any of various synchronous or asynchronous hardware copy instructions +and can auto-vectorize the copy instructions in many cases as well. +CuTe's `copy` algorithm lives in +[`include/cute/algorithm/copy.hpp`](../../../include/cute/algorithm/copy.hpp). +For more details on the algorithms that CuTe provides, +please refer to [the algorithms section](./04_algorithms.md) +of the tutorial, or the +[CuTe overview in the GEMM tutorial](./0x_gemm_tutorial.md). + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/04_algorithms.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/04_algorithms.md new file mode 100644 index 0000000000000000000000000000000000000000..e35b75612dfc28b82e1da5076f5ce2e97e188592 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/04_algorithms.md @@ -0,0 +1,223 @@ +# CuTe Tensor algorithms + +This section summarizes the interfaces and implementations +of common numerical algorithms performed on `Tensor`s. + +The implementation of these algorithms may be found in the +[include/cute/algorithm/](../../../include/cute/algorithm/) +directory. + +## `copy` + +CuTe's `copy` algorithm copies the elements of a source `Tensor` +into the elements of a destination `Tensor`. +The various overloads of `copy` can be found in +[`include/cute/algorithm/copy.hpp`](../../../include/cute/algorithm/copy.hpp). + +### Interface and specialization opportunities + +A `Tensor` encapsulates the data type, data location, +and possibly also the shape and stride of the tensor at compile time. +As a result, `copy` can and does dispatch, +based on the types of its arguments, +to use any of various synchronous or asynchronous hardware copy instructions. + +The `copy` algorithm has two main overloads. +The first just takes the source `Tensor` and the destination `Tensor`. + +```c++ +template +CUTE_HOST_DEVICE +void +copy(Tensor const& src, + Tensor & dst); +``` + +The second takes those two parameters, plus a `Copy_Atom`. + +```c++ +template +CUTE_HOST_DEVICE +void +copy(Copy_Atom const& copy_atom, + Tensor const& src, + Tensor & dst); +``` + +The two-parameter `copy` overload picks a default implementation +based only on the types of the two `Tensor` parameters. +The `Copy_Atom` overload lets callers override that default +by specifying a nondefault `copy` implementation. + +### Parallelism and synchronization depend on parameter types + +Either the default implementation or +the implementation selected by a `Copy_Atom` overload +may use none or all available parallelism, +and may have a variety of synchronization semantics. +The behavior depends on `copy`'s parameter types. +Users are expected to figure this out based on their knowledge +of the architecture on which they are running. +(Developers often write a custom optimized kernel +for each GPU architecture.) + +The `copy` algorithm may be sequential per thread, +or it may be parallel across some collection of threads +(e.g., a block or cluster). + +If `copy` is parallel, +then the collection of participating threads +may need synchronization before any thread in the collection +may assume that the copy operation has completed. +For example, if the participating threads form a thread block, +then users must invoke `__syncthreads()` +or the Cooperative Groups equivalent +before they may use the results of `copy`. + +The `copy` algorithm may use asynchronous copy instructions, +such as `cp.async`, or its C++ interface `memcpy_async`. +In that case, users will need to perform +the additional synchronization appropriate to that underlying implementation +before they may use the results of the `copy` algorithm. +[The CuTe GEMM tutorial example](../../../examples/cute/tutorial/sgemm_nt_1.cu) +shows one such synchronization method. +More optimized GEMM implementations use pipelining techniques +to overlap asynchronous `copy` operations with other useful work. + +### A generic copy implementation + +A simple example of a generic `copy` implementation +for any two `Tensor`s looks like this. + +```c++ +template +CUTE_HOST_DEVICE +void +copy(Tensor const& src, // Any logical shape + Tensor & dst) // Any logical shape +{ + for (int i = 0; i < size(src); ++i) { + dst(i) = src(i); + } +} +``` + +This generic `copy` algorithm addresses both `Tensor`s +with 1-D logical coordinates, thus traversing both `Tensor`s +in a logical column-major order. +Some reasonable architecture-independent optimizations +would include the following. + +1. If the two `Tensor`s have known memory spaces with optimized + access instructions (like `cp.async`), then dispatch to the + custom instruction. + +2. The the two `Tensor`s have static layouts and it can be proven + that element vectorization is valid -- for example, four `LDS.32`s + can be combined into a single `LDS.128` -- then vectorize the source + and destinations tensors. + +3. If possible, validate that the copy instruction to be used is + appropriate for the source and destination tensors. + +CuTe's optimized copy implementations can do all of these. + +## `copy_if` + +CuTe's `copy_if` algorithm lives in the same header as `copy`, +[`include/cute/algorithm/copy.hpp`](../../../include/cute/algorithm/copy.hpp). +The algorithm takes source and destination `Tensor` parameters like `copy`, +but it also takes a "predication `Tensor`" +with the same shape as the input and output. +Elements of the source `Tensor` are only copied +if the corresponding predication `Tensor` element is nonzero. + +For details on why and how to use `copy_if`, +please refer to the +["predication" section of the tutorial](./0y_predication.md). + +## `gemm` + +### What `gemm` computes + +The `gemm` algorithm takes three `Tensor`s, A, B, and C. +What it does depends on the number of modes +that its `Tensor` parameters have. +We express these modes using letters. + +* V indicates a "vector," a mode of independent elements. + +* M and N indicate the number of rows resp. columns + of the matrix result C of the BLAS's GEMM routine. + +* K indicates the "reduction mode" of GEMM, + that is, the mode along which GEMM sums. + Please see the [GEMM tutorial](./0x_gemm_tutorial.md) for details. + +We list the modes of the input `Tensor`s A and B, +and the output `Tensor` C, +using a notation `(...) x (...) => (...)`. +The two leftmost `(...)` describe A and B (in that order), +and the `(...)` to the right of the `=>` describes C. + +1. `(V) x (V) => (V)`. The element-wise product of vectors: Cv += Av Bv. Dispatches to FMA or MMA. + +2. `(M) x (N) => (M,N)`. The outer product of vectors: Cmn += Am B_n. Dispatches to (4) with V=1. + +3. `(M,K) x (N,K) => (M,N)`. The product of matrices: Cmn += Amk Bnk. Dispatches to (2) for each K. + +4. `(V,M) x (V,N) => (V,M,N)`. The batched outer product of vectors: Cvmn += Avm Bvn. Optimizes for register reuse and dispatches to (1) for each M, N. + +5. `(V,M,K) x (V,N,K) => (V,M,N)`. The batched product of matrices: Cvmn += Avmk Bvnk. Dispatches to (4) for each K. + +Please refer to the [GEMM tutorial](./0x_gemm_tutorial.md) +for an overview of CuTe's convention for ordering the modes. +For example, if K appears, it always appears rightmost ("outermost"). +If V appears, it always appears leftmost ("innermost"). + +### Dispatch to optimized implementations + +Just like with `copy`, CuTe's implementations of `gemm` +uses its `Tensor` arguments' types to dispatch +to an appropriately optimized implementation. +Also like `copy`, `gemm` takes an optional `MMA_Atom` parameter +that lets callers override the default `FMA` instruction +that CuTe would select based on the `Tensor` arguments' types. + +For more information on `MMA_Atom` and on specialization of `gemm` +for different architectures, please refer to the +[MMA section of the tutorial](./0t_mma_atom.md). + +## `axpby` + +The `axpby` algorithm lives in the header file +[`include/cute/algorithm/axpby.hpp`](../../../include/cute/algorithm/axpby.hpp). +It assigns to $y$ the result of $\alpha x + \beta y$, +where $\alpha$ and $\beta$ are scalars and $x$ and $y$ are `Tensor`s. +The name stands for "Alpha times X Plus Beta times Y," +and is a generalization of the original BLAS "AXPY" routine +("Alpha times X Plus Y"). + +## `fill` + +The `fill` algorithm lives in the header file +[`include/cute/algorithm/fill.hpp`](../../../include/cute/algorithm/fill.hpp). +It overwrites the elements of its `Tensor` output argument +with a given scalar value. + +## `clear` + +The `clear` algorithm lives in the header file +[`include/cute/algorithm/clear.hpp`](../../../include/cute/algorithm/clear.hpp). +It overwrites the elements of its `Tensor` output argument with zeros. + +## Other algorithms + +CuTe provides other algorithms. +Their header files can be found in the +[`include/cute/algorithm`](../../../include/cute/algorithm) +directory. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/0t_mma_atom.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/0t_mma_atom.md new file mode 100644 index 0000000000000000000000000000000000000000..f1880464be317583d7326576324d5f07fef7a9af --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/0t_mma_atom.md @@ -0,0 +1,434 @@ +# CuTe's support for Matrix Multiply-Accumulate instructions + +In this file, we explain in detail how we support our GPUs' +Matrix Multiply-Accumulate (MMA) hardware instructions in CuTe. + +MMAs are architecture-specific. +Different generations of GPU architectures +introduce different sets of MMA instructions. +However, CuTe features such as `Layout` +makes it possible to expose MMAs for use in generic CUDA C++ code. +We do this in two steps. + +1. We wrap each MMA's PTX instruction in an "Operation" struct. + +2. For each Operation struct, we define a "Traits" struct + that defines all of the meta-information needed to use the Operation. + +## CuTe MMA Atoms + +CuTe exposes each MMA to generic CUDA C++ code as a pair of structs: +an "Operation" struct, +and an `MMA_Traits` struct templated on the Operation struct type. + +An "Operation" struct exposes the PTX instruction +for that specific operation. +It defines the arguments and interface it expects. +Operation structs have minimal software dependencies -- +they do not use layouts, tensors, or non-standard numeric data types. +Different structs have different names +that describe what the MMA instruction does. +We will explain the naming scheme below. + +A corresponding `MMA_Traits` struct specialization +defines meta-information about the Operation, +such as the compute types, the logical shape of the operation, +and the `Layout`s of threads and values within the operation. +The `MMA_Traits` struct takes the Operation as a template parameter. +CuTe specializes `MMA_Traits` for each Operation type that it supports. + +Together, these two types comprise an "Atom" that decouples the complexity of thread and data layouts from the call site of of the PTX instruction. The Atom's Traits struct exposes information that is relevant to a single MMA operation, no matter the granularity at which it operates. + +CuTe MMA atoms expose the semantics of a single MMA operation. +This is true regardless of the hardware level at which the MMA operates. +CuTe supports MMA atoms that operate at a variety of hardware levels, +including + +* a single thread (e.g., fused multiply-add (FMA) instruction); + +* a quadpair (Volta); + +* a single warp (Ampere); and + +* a warpgroup (Hopper). + +### Operation structs + +#### Location of files + +CuTe provides its Operations structs in the +[`include/cute/arch`](../../../include/cute/arch) +directory, in header files starting with `mma`. + +#### Operation struct's name + +A CuTe Operation struct's name encodes information about + +* its first supported architecture, + +* the M, N, and K dimensions that it accepts, + +* the types that it takes, and + +* the expected A and B layouts. + +For example, the Volta section below will refer to the +`SM70_8x8x4_F32F16F16F32_NT` Operation struct defined in +[`include/cute/arch/mma_sm70.hpp`](../../../include/cute/arch/mma_sm70.hpp). + +* "SM70" refers to Volta. + +* "8x8x4" refers to M = 8, N = 8, and K = 4, + the dimensions of the MMA operation that the quadpair performs + (see below). + +* "F32F16F16F32" refers to the element types + of the four matrix operands A, B, C, and D. + An MMA computes D = C + A * B, + so we read the types from left to right: + D is F32 (`float`), A is F16 (half), + B is F16 (half), and C is F32 (`float`). + +* "NT" means that A is M-major (not transposed) + and B is N-major (transposed). + +#### Contents + +An Operation struct has the following members. + +##### Type aliases + +An Operation struct has four public type aliases: +`DRegisters`, `ARegisters`, `BRegisters`, and `CRegisters`. +For example, the `SM70_8x8x4_F32F16F16F32_NT` Operation struct defined in +[`include/cute/arch/mma_sm70.hpp`](../../../include/cute/arch/mma_sm70.hpp) +defines these as follows. + +```c++ +using DRegisters = float[8]; +using ARegisters = uint32_t[2]; +using BRegisters = uint32_t[2]; +using CRegisters = float[8]; +``` + +This shows how many values each thread will pass into the PTX instruction +for each of the matrices A, B, C, and D. For this Operation, +each thread passes 8 F32 values each for C and D (hence `float[8]`), +and 4 F16 values each for A and B (hence `uint32_t[2]`; +the instruction packs two 16-bit F16 values +in each of the two 32-bit `uint32_t` values). + +##### `fma` static member device function + +An operation struct defines a public `static void fma` function. +It is marked with the `CUTE_HOST_DEVICE` macro, +which adds the `__host__ __device__` annotations. +Different Operations define `fma` to take different numbers of arguments, +depending on the PTX MMA instruction. +The implementation protects use of the PTX instruction with a macro, +and raises an `assert` if `fma` is called when the macro is not defined. +This ensures that tests and examples that use this Operation in an Atom +can still compile, even if the PTX instruction is not available. + +### Traits + +#### Location of files + +CuTe provides its Traits structs in the +[`include/cute/atom`](../../../include/cute/atom) +directory, in header files starting with `mma_traits`. + +#### Contents + +An `MMA_Traits` specialization defines the following public type aliases. + +* `ElementDVal`: Compute type of the D matrix + +* `ElementAVal`: Compute type of the A matrix + +* `ElementBVal`: Compute type of the B matrix + +* `ElementCVal`: Compute type of the C matrix + +* `Shape_MNK`: Logical MxNxK shape of the MMA operation + +* `ThrID`: Logical thread mapping within the single MMA operation + (specifying the quadpair, warp, or warpgroup view) + +* `ALayout`: Mapping of (thread,value) pairs to the logical MxK A matrix + +* `BLayout`: Mapping of (thread,value) pairs to the logical NxK B matrix + +* `CLayout`: Mapping of (thread,value) pairs to the logical MxN C matrix + +#### Example + +The specialization of MMA_Traits for the +`SM70_8x8x4_F32F16F16F32_NT` Operation lives in the header file +[`include/cute/atom/mma_traits_sm70.hpp`](../../../include/cute/atom/mma_traits_sm70.hpp). +It looks like this. + +```c++ +template <> +struct MMA_Traits +{ + using ElementDVal = float; + using ElementAVal = half_t; + using ElementBVal = half_t; + using ElementCVal = float; + + using Shape_MNK = Shape<_8,_8,_4>; + using ThrID = SM70_QuadPair; + using ALayout = SM70_8x4_Col; + using BLayout = SM70_8x4_Col; + using CLayout = SM70_8x8_32b; +}; +``` + +The next section will explain these type aliases in detail. + +## Volta + +This and the following sections show examples of how to construct MMA atoms. +We don't try to explain this for all GPU architectures and MMAs. +Instead, we use selected examples to illustrate the process +of developing new atoms. + +Volta architecture implements an HMMA instruction where a group of 8 threads called a quadpair (QP) collaborate to share data and perform an 8x8x4 (fp32 or fp16) matrix multiply-accumulate. (since a warp is 32 threads wide, it would perform an MMA across 4 QPs for a tile size of 16x16x4). + +We first take a look at how we would take the ISA semantics of thread and data partitioning for the HMMA instruction, and encode it in a Traits struct. The HMMA NT instruction has the thread-data layout: + +

+ HMMA.8x8x4.NT.png +

+ +### Types + +The HMMA NT above uses types: + +```cpp + using ElementDVal = float; + using ElementAVal = half_t; + using ElementBVal = half_t; + using ElementCVal = float; +``` + +The rest of the `MMA_Traits` will be described in units of these types. + +### Shape + +The HMMA NT above has shape 8x8x4: + +```cpp + // Logical shape of the MMA + using Shape_MNK = Shape <_8,_8,_4>; +``` + +### Thread ID + +If the 32 threads in a warp are logically indexed by [0 ... 31], then the above image contains threads [0,1,2,3]U[16,17,18,19]. These threads make up the 0th quadpair. We can write a thread mapping that maps eight logical thread ids [0,1,2,3,4,5,6,7] of the MMA to a quadpair thread index [0,1,2,3]U[16,17,18,19] of a warp. The layout function has 4 elements with a stride of 1 and 2 of those with a stride of 16. With this, we write a layout that represents a quadpair: + +```cpp + // Mapping from (logical thread id) -> (thread idx) + using ThrID = Layout, + Stride<_1,_16>>; +``` + +Again, this layout function maps the logical thread id [0,8) of the MMA operation onto the quadpair thread index [0,4)U[16,20) of a warp. + +### Accumulator Mapping + +Let us look at exactly how the 8 threads within a QP are mapped to the A, B and C matrices. For the C and D matrices, the above image is broken down a bit more below. On the left is shown the whole QP level view, and on the right is shown the values owned by just thread 0. + +

+ HMMA.8x8x4.quadpair.C.png +

+ +The metainformation of this single instruction level view is what we want to encode in CuTe. Specifically, the QP level view in this diagram corresponds to the four MMA traits for [SM70_F32F16F16F32](../../../include/cute/arch/mma_sm70.hpp). These structs contain the `Element` types, the `Shape_MNK`, and the `ThrID` mapping we constructed above. Now, let us take a look at the definition of `CLayout`, the thread-data layout of accumulators. The job of `CLayout` is to construct a mapping between the `(logical_thr_id, logical_val_id)` and `(m, n)` coordinate in the C matrix which can then be used to build up more complicated layouts and operations like the 16x16x4 WMMA. + +We can start constructing a `CLayout` from the picture above. As with any CuTe layout, it is a pair of `Shape` and corresponding `Stride`. Let us just look at the shape for now. We know that the HMMA uses 8 threads each of which own 8 values. Therefore, the shape of our mapping must have a size of 8 along two modes. With this, we have + +```cpp + // (T8,V8) -> (m,n) + using CLayout = Layout, + Stride<_?, _?>; // Stride to be filled in below +``` + +This is not to be confused with the logical 8x8 shape of the C matrix. This is 8-threads by 8-values. We now want to map those to (m,n) coordinates. Since CuTe layouts return indices rather than coordinates, we choose a column-major encoding of the (m,n) coordinates: + +``` +(logical_thr_id, logical_val_id) -> (m, n) == m + n * M +``` + +With this in place, we can start thinking about how to construct the strides in `CLayout`. Let's begin by looking at the strides between threads. Note that +* `(T0,V0)` is located at `(m,n) = (0,0) = 0` +* `(T1,V0)` is located at `(m,n) = (1,0) = 1` +* `(T2,V0)` is located at `(m,n) = (0,2) = 16` +* `(T3,V0)` is located at `(m,n) = (1,2) = 17` +* `(T4,V0)` is located at `(m,n) = (4,0) = 4` +* `(T5,V0)` is located at `(m,n) = (5,0) = 5` +* `(T6,V0)` is located at `(m,n) = (4,2) = 20` +* `(T7,V0)` is located at `(m,n) = (5,2) = 21` + +where `T4`,`T5`,`T6`,`T7` are the 4th,5th,6th,7th logical thread id of the MMA corresponding to thread indices of 16,17,18,19 of the warp (recorded in the `ThrID` mapping!). + +We note that the pattern can be transcribed to a layout. We can find the position of the 8 threads via + +```cpp + using CLayout = Layout, _8>, + Stride, _?>; +``` + +With the exact same approach, we can construct the stride along the `logical value id` mode. +* `(T0,V0)` is located at `(m,n) = (0,0) = 0` +* `(T0,V1)` is located at `(m,n) = (0,1) = 8` +* `(T0,V2)` is located at `(m,n) = (2,0) = 2` +* `(T0,V3)` is located at `(m,n) = (2,1) = 10` +* `(T0,V4)` is located at `(m,n) = (0,4) = 32` +* `(T0,V5)` is located at `(m,n) = (0,5) = 40` +* `(T0,V6)` is located at `(m,n) = (2,4) = 34` +* `(T0,V7)` is located at `(m,n) = (2,5) = 42` + +We note that this pattern can also be transcribed to a layout. We can find the position of the 8 values via + +```cpp + // (T8,V8) -> (m,n) + using CLayout = Layout, Shape <_2,_2, _2>>, + Stride, Stride<_8,_2,_32>>>; +``` + +And that's all! We can verify that each `(tid,vid)` coordinate in this layout is reliably mapped to the correct (encoded) `(m,n)` coordinate. + +In the case of F16 accumulators, the layout is way less complex. Each row of accumulators `(m, :)` is held by a single thread, which makes the layout: + +```cpp + using CLayout = Layout, + Stride<_1,_8>>; +``` + +### A and B Layout Mapping + +A and B matrix layouts depend on whether the sources are transposed or not. The diagram below shows the thread ID to data ownership map for A and B matrices in the case of NT and TN transposes. + +

+ HMMA.8x8x4.quadpair.AB.png +

+ +Let's look at the TN layout for A matrix first (right side in the diagram). Again, there are the same 8 logical threads, but each threads owns only 4 elements this time. The shape of `ALayout` will then be `Shape<_8, _4>`. As for the strides, we again need a similar mapping between `(m, k) == m + k * M`. Looking down the `M` mode, we go from `(T0, V0)` to `(T1, V0)` which is a stride of 1 for all 8 threads. For the `K` mode, as we go across, we go from `(T0, V0)` to `(T0, V1)`, which makes a stride of 8 for all 4 values. Therefore, the A layout is: + +```cpp + // (T8,V4) -> (m,k) + using ALayout = Layout, + Stride<_1,_8>>; +``` + +Source B layout is constructed similarly for the TN HMMA, except that we want write it as `(N,K)` rather than `(K,N)` for convenience. For the strides, as we go across the `N` mode, we go from `(T0, V0)` to `(T1, V0)`, making this a stride of 1 for all 8 threads. As we go down the `K` mode, `(T0, V0)` to `(T0, V1)` which is a stride of 8 for all 4 values. So the B layout is the same as A: + +```cpp + // (T8,V4) -> (n,k) + using BLayout = Layout, + Stride<_1,_8>>; +``` + +The layouts in the case of NT are a bit more complicated (left side of the diagram). Going down the `M` mode of `A`, we see the four values of `T0` first and then we see the four values of `T4`. This means we first have a stride of 1 for 4 values, followed by a stride of 4 from `T0` to `T4`. So we have two sub-strides along the `M` mode. For the `K` mode, as we go across, we simply increment the `thr_id`, keeping `val_id` the same, making the stride 8 for 4 threads. This makes the A layout: + +```cpp + // (T8,V4) -> (m,k) + using ALayout = Layout,_4>, + Stride,_1>>; +``` + +With the `(N,K)` ordering for B, the layout is the same. + +```cpp + // (T8,V4) -> (n,k) + using BLayout = Layout,_4>, + Stride,_1>>; +``` + +For the NN and TT transposes, they are simply combinations of the two layouts we have seen for A and B so far. + +## Hopper + +Now, we are ready to take a look at the much larger GMMA operation (Group MMA) first introduced with Hopper architecture. These MMA instructions operate at the granularity of 128 threads (4 warps), which are collectively referred to as a warpgroup. + +### Thread ID + +In the case of Hopper GMMAs, the thread IDs are assigned based on the simple 1D contiguous layout, which makes `thrID` trivial: + +```cpp +using ThrID = Layout<_128, _1>; +``` + +### Accumulator Mapping + +Accumulators are mapped hierarchically in GMMA, starting from the concept of a core matrix and building up to a layout for the whole C matrix tile. Let's look at this core matrix first. We only consider fp16 accumulators here, but extensions of fp32 accumulators as trivial as we will see later. + +Each core matrix has the layout as shown in the diagram below. +

+ gmma_coremat_cd_fp16.png +

+ +As in the Volta examples, the thread IDs are logical only, and which of the four warps they belong to in the warpgroup is not important. + +Then GMMA tiles this core matrix first vertically along the M mode, and then repeats that column of core matrices along the N mode to construct the full MxN tile. This tiling is shown in the image below. + +

+ gmma_wg_n_slice.png +

+ +With this image, we are again ready to start building the `CLayout` for `SM90_64x128x16_F16F16F16F16_TN` atom. Same as before, we are constructing a mapping between the `(logical_thr_id, logical_val_id) -> (m, n)` coordinate spaces. + +To begin, let's follow the first few threads and values. We immediately see that they are arranged along the `N`-mode with pairs of values and four threads. This gives us + +```cpp +// (T128,V4) -> (M64,N8) +using CLayout = Layout, Shape < _2, ...>>, + Stride, Stride<_64, ...>>>; +``` + +To complete the first 8x8 core matrix, the four threads repeat eight times down the `M`-mode: + +```cpp +// (T128,V4) -> (M64,N8) +using CLayout = Layout, Shape < _2, ...>>, + Stride, Stride<_64, ...>>>; +``` + +Then, as we go to the next core matrix, we wrap back again to `T0`, but this time to `(T0, V2)`. + +```cpp +// (T128,V4) -> (M64,N8) +using CLayout = Layout, Shape < _2, _2>>, + Stride, Stride<_64, _8>>>; +``` + +Finally, we get this entire pattern repeating four times, once for each warp, down the `M`-mode starting at `(m,n) = (16,0) = 16`. where two core matrices that belong to the same warp are stacked on top of each other. This makes the size of the final sub-mode of M 4. As for the stride, this time we go to `(T32, V0)`, which makes it a stride of 32. + +```cpp +// (T128,V4) -> (M64,N8) +using CLayout = Layout, Shape < _2, _2>>, + Stride, Stride<_64, _8>>>; +``` + +This is the full `CLayout` for 64x8 accumulators. The GMMA instructions include 64xN variants with `N = [16,32,64,128,256]` where this 64x8 pattern is repeated giving each thread additional values. As this starts at `(m,n) = (0,8) = 512`, this is easy to account for in our `CLayout`. For example, the 64x128 `CLayout` is + +```cpp +// (T128,V64) -> (M64,N128) +using CLayout = Layout, Shape < _2, _2, _16>>, + Stride, Stride<_64, _8, _512>>>; +``` + +where we see 16 copies of the 64x8 tile. + +### A and B Layout Mapping + +GMMA atoms that consume A and B sources directly from shared memory are a bit interesting. The GMMA Descriptor is constructed on an entore tile of A and/or B data in shared memory rather than being partitioned by threads. That is, every thread sees the entire tile of data and the tile is not reordered so that the descriptor can be constructed on it. In `ALayout` form, this can be expressed + +```cpp +// (T128,V64x8) -> (M64,K16) +using ALayout = Layout>, + Stride< _0, Stride< _1,_64>>>; +``` + +That is, all threads are mapped the to `(m,k) = (0,0) = 0` element and the values (and shape of the values) remains unchanged. The GMMA Descriptor Constructor can then inspect the `(M,K)` layout of this data and create an appropriate GMMA Descriptor or produce an error message saying the data is in an invalid layout for GMMA. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/0x_gemm_tutorial.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/0x_gemm_tutorial.md new file mode 100644 index 0000000000000000000000000000000000000000..17cccbf83479e71ef4b880a95087d93b1511db71 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/0x_gemm_tutorial.md @@ -0,0 +1,668 @@ +# CuTe dense matrix-matrix multiply tutorial + +This section uses the CuTe functionality to write +a dense matrix-matrix multiply implementation. + +## A simple dense matrix-matrix multiply example + +In this section, we will go through +[this example](../../../examples/cute/tutorial/sgemm_nt_1.cu). +It illustrates a blocked GPU implementation of GEMM +that uses the building blocks of CuTe +to construct global and shared memory layout mappings +and partition threads among them. +This example is closest to the blocked GEMM +that a computer science student might be asked to implement +in a first-year graduate school +or upper-division undergraduate scientific computing course. + +Readers who understand this section may also wish to study +CUTLASS's implementation of the stream-K GEMM algorithm, +which uses many features of CuTe. + +### Filename and high-level interface + +First, let's look at the example's filename `sgemm_nt_1.cu`. +"SGEMM" is the BLAS (Basic Linear Algebra Subroutines) abbreviation +for "Single-precision real, GEneral, Matrix-matrix Multiply." +(If we want to refer to matrix-matrix multiply for all data types, +we say "GEMM.") +The BLAS project started in the 1970s. +You can learn more about its history in Turing Award winner Jack Dongarra's +2004 Oral History interview by SIAM +(the Society for Industrial and Applied Mathematics), +and also in the C++ Standard document [P1417](https://wg21.link/p1417). +The abbreviation SGEMM unpacks as follows. + +* "Single-precision" is Fortran-speak for float. + The BLAS supports four different matrix or vector element types: + + * S for single precision (`float`), + + * D for double precision (`double`), + + * C for complex float (like C++'s `std::complex`, + where each of the real and imaginary components has type `float`), + and + + * Z for complex double (like C++'s `std::complex`). + +* "GEneral" means that the matrix is represented + as a two-dimensional dense array + and not assumed to have any kind of symmetry. + The BLAS supports a variety of matrix representations, + including + + * SY: SYmmetric, + + * HE: HErmitian, + + * TR: TRiangular, + + * GB: General Banded, + + * SB: Symmetric Banded, + + * SP: Symmetric Packed, and + + * TP: Triangular Packed. + +* MM means "Matrix-matrix multiply," as opposed to other operations, + like MV (Matrix-Vector multiply). + +The string "nt" in the filename means that +the first input matrix A is "Not transposed," +while the second input matrix B is "Transposed." +That is, the function computes `C := beta * C + alpha * A * B^T`, +where the superscript T denotes the transpose of the matrix. +(We never change the input matrix in place or +store its entire transpose explicitly. +Instead, we reinterpret its data in place.) + +GEMM's TRANSA and TRANSB arguments lets users specify +the transpose or Hermitian transpose (if complex) +of either or both input matrices A or B. +It turns out that implementations favor this "NT" case, +along with "TN" (A is Transposed, B is Not transposed). +We will explain why below. + +As described, the original BLAS GEMM specifies +the dimensions of its matrices +as A is M x K, B is K x N, and C is M x N. +Out of convenience, CuTe interprets A +as M x K, B as N x K, and C as M x N. Instead of row-major or column-major (or Transposed +and Not-Transposed like above), we like to be more specific with M-major, N-major, or K-major. +Regardless, we'll still use the BLAS "NT" notation for high-level descriptions +of kernels when it's appropriate. + +Now, let's look at the code. +We'll start with the kernel entry point `gemm_device` +at the top of the file. + +```c++ +template +__global__ static +__launch_bounds__(decltype(size(CThreadLayout{}))::value) +void +gemm_device(MShape M, NShape N, KShape K, + TA const* A, AStride dA, ABlockLayout blockA, AThreadLayout tA, + TB const* B, BStride dB, BBlockLayout blockB, BThreadLayout tB, + TC * C, CStride dC, CBlockLayout , CThreadLayout tC, + Alpha alpha, Beta beta); +``` + +There are many template parameters; +we'll explain them all in due time. + +`TA`, `TB`, and `TC` are the element types +of the matrices `A`, `B`, and `C`, respectively. +The two scalar constants `alpha` and `beta` +are part of what GEMM computes: `C = beta * C + alpha * A * B`. +Unlike the (traditional Fortran and C) BLAS, +CuTe lets you mix different matrix element types and/or scalar types. +The compiler will help, but it's somewhat up to you +to use types that are safe and efficient on the GPU. +For example, a custom arbitrary-precision real type +that does dynamic allocation inside may not work on the GPU at all. +Even if it does, it may not perform well. + +This leaves five kinds of things to explain: + +1. Shapes + +2. Strides + +3. Block layouts + +4. Thread layouts + +5. Launch bounds + +### Shapes + +The original Fortran BLAS GEMM lists the matrices' dimensions +in the order M, N, K. CuTe also uses this convention. +The "MShape" is just M, +the NShape is just N, +and the KShape is just K. +In this example, they are dynamic (run-time) values +defined at the top of the `gemm` host function +that invokes the device kernel. + +```c++ +// Define shapes (dynamic) +auto M = int(m); +auto N = int(n); +auto K = int(k); +``` + +Note that the function takes M, N, and K. +It doesn't take the shapes of the three matrices separately, +as (say) three different `Shape` objects. +This is because matrix-matrix multiply constrains the shapes. + +There's nothing mysterious about `int` here; +it's the usual C++ built-in integral type. +`auto M = int(m)` is a way to say +"convert `m` to an `int` if it's not already an `int`, +and assign it to the freshly declared variable `M`." +CuTe also has a capitalized `Int` templated type +for representing values as compile-time constants. +For example, `Int<5>` represents a compile-time `int` value 5. +(CuTe implements these as subclasses +of the C++ Standard Library class `std::integral_constant`.) +The above `gemm_device` function is templated on the types +of M, N, and K; this shows that CuTe can represent dimensions +as either run-time or compile-time values. + +If you're familiar with the mdspan class going into C++23, +you might notice that CuTe represents shapes +a bit differently from mdspan. +mdspan uses `extents` +to represent a shape. +The `Extents` are zero or more compile-time values +(see below) representing the dimensions in the shape. +The `Extents...` are "non-type template parameters" (NTTPs) -- +that is, they are not types, but compile-time values of type `size_t`. +If you use the special reserved `size_t` value `std::dynamic_extent` +as an extent value, +the resulting dimension is a run-time value +and is stored in the `extents` instance. +Any other extent value is a compile-time value +that is encoded in the extents type itself. +In contrast, CuTe represents a shape as `Shape`. +The `Types...` are actual types, not NTTPs. +A built-in integral type like `int` or `uint64_t` +denotes a run-time dimension that is stored in the `Shape` instance, +while a compile-time value like `Int<5>` +encodes a compile-time dimension. +For example, the CuTe equivalent of +`extents` +is `Shape, int, Int<5>>`. + +#### Compile-time-ness of values + +C++ values have three levels of "compile-time-ness": + +1. dynamic (run-time) values, + +2. constexpr values, and + +3. static (compile-time) values. + +(Rather than saying "C++ has," +it's more accurate to say "C++17 has." +C++20 introduces `consteval` or "immediate" functions, +which make attempting to evaluate the function at run time +(any call not in an unevaluated context) a compiler error. +We'll ignore those for this tutorial, +since CuTe only requires C++17.) + +The `constexpr` keyword was introduced in C++11. +It means something like +"the compiler can evaluate this expression at compile time." +It does NOT mean "the compiler MUST evaluate this at compile time." +If you use a `constexpr` expression in a `static_assert` +or as a non-type template argument, +then the compiler must evaluate the expression at compile time. +However, for `constexpr` occurring in other places, +the compiler may choose to store the value in registers or memory, +and/or do computations with the value at run time. +In some cases, the compiler must do that. +The following example shows that the compiler +might need to store `constexpr` values in memory sometimes. + +```c++ +// Some function defined in a different compilation unit. +extern int foo(int* x); + +int bar() +{ + constexpr int value = 42; // a compile-time constant + + // Even constexpr variables have a sizeof, + // because we still might need to take their address. + static_assert(sizeof(value) == 4); + + // Compiler can't inspect foo to see how it uses the value, + // so it has to store the value in some memory location + // so that we can pass its address to the function. + return foo(&value); +} +``` + +"Static" is an unfortunately overloaded term in C++. Sometimes it means "the opposite of instance," like a "static function" or "static member" of a class. (Some programming languages, like Java, say "class method" to refer to a "static function of a class.") That's not what we mean here. Instead, we mean "part of a compile-time type." For example, `Int<1>` encodes the value 1 at compile time, as part of the type of a templated class `Int`. `Int<3>` and `Int<4>` have different types. You can get the value of of the type like this: `Int<3>::value`. (The `value` is a `static constexpr` member of the class, where "static" means "opposite of instance.") As soon as you go from `Int<3>` to `Int<3>::value`, you've gone from (3) above (a compile-time value) to (2) above (a `constexpr` value). In some situations, this may mean that the compiler treats it as a run-time value. + +#### Strides + +We define a layout using both shapes and strides. +The shape just tells you the dimensions (modes, etc.) of the array. +The strides tell you the mapping from a multidimensional index +into a one-dimensional offset. +Here, we're describing the shapes and strides +of the "global" matrices A, B, and C. +The example defines the global matrices' strides +near the top of the `gemm` function. + +```c++ +// Define strides (mixed) +auto dA = make_stride(Int<1>{}, ldA); // (dM,dK) +auto dB = make_stride(Int<1>{}, ldB); // (dN,dK) +auto dC = make_stride(Int<1>{}, ldC); // (dM,dN) +``` + +To evaluate this mapping for a given multidimensional index, take the dot product of the indices with the strides. For example, the offset of `A(index_m, index_k)` is `index_m * 1 + index_k * ldA`. Note the implications for the compile-time-ness of the offset. Any run-time value among either the shape or the strides makes the offset a run-time value. Of course, if a particular stride is a compile-time constant (especially 1), it's easier for the compiler to optimize the arithmetic and result. + +Note that in the original source code, +this example is missing the comments after each line. +We've added them in here, +as they stir a brief digression about shapes and modes. +The comment after B says (dN, dK), not (dK, dN). +This means that B is treated as an N x K matrix +instead of a K x N matrix. +As mentioned, CuTe follows the convention +that the meaning of matrix modes is +(M,K) for A, (N,K) for B, and (M,N) for C. +In particular, CuTe's convention is that +"the reduction mode is outermost." +The "reduction mode" of `Shape` is K. +That's the mode over which we do a reduction, +that is, sum up products of matrix entries. +The K mode disappears in the output C. +"Outermost" here means "rightmost" +(literally, appearing rightmost in the list M, N, K). +Note that the shapes form a kind of Einstein tensor notation. +GEMM does Shape = Shape * Shape. +In Einstein notation, the repeated index indicates +a sum of that term over all values of K. + +We say in general that the leftmost mode is the "inner(most)" mode, +and the rightmost mode is the "outer(most)" mode. +This is because, +along with CuTe's convention of thinking of arrays as logically column major, +the leftmost mode is most commonly the mode with the most spatial locality. +It's very often the "most contiguous" mode. +For this reason, it's "the mode that we want in the innermost loop" +(in the nesting of loops that implements GEMM). +This is why we call it the "innermost" mode. +Its contiguity means that also call the innermost mode the "vector mode." + +The vector mode also has special meaning: +it contains all of the information needed +to execute the smallest possible computation or communication operations +on hardware, that is, what CuTe calls the "atoms." + +Modes are like units conceptually. +For example, you shouldn't mix M-mode indices with K-mode indices. +However, CuTe does nothing to enforce this. +(For example, CuTe does not require use of "tagged" index types. +Indexing works with the usual integer types.) + +The previous paragraph relates to shapes, not strides. +Returning to the strides, the above code describes these strides as "mixed." +This means that they include both run-time and compile-time values. +For example, the stride between A(m, k) and A(m+1, k) is `Int<1>`, +a compile-time value 1. The stride between A(m, k) and A(m, k+1), +however, is `ldA`, the "leading dimension of A," a run-time value. +The "leading dimension" of a matrix +refers to the stride between consecutive columns of a column-major matrix +(where the stride between consecutive rows is 1), +or the stride between consecutive rows of a row-major matrix +(where the stride between consecutive columns is 1). +This is a naming convention from the BLAS +and libraries that use it, like LAPACK. +For the purpose of this tutorial, it's just a naming convention +for "the stride that isn't the compile-time constant 1." + +#### M-major, N-major, K-major + +Note that we haven't uttered the phrases "column-major" or "row-major" here. This is where the experience of a BLAS user diverges from the experience of a BLAS implementer. BLAS users speak of "column-major" and "row-major" layouts. C++23's `mdspan` class encodes these as `layout_left` resp. `layout_right`. However, we don't speak of "column-major" or "row-major" in our GEMM implementations. + +We say that a matrix is "M-major" if it is stride 1 in the M-mode, "N-major" if it is stride 1 in the N-mode, or "K-major" if it is stride 1 in the K-mode. In the above code, A has shape (M, K) and strides (1, ldA). Since A has stride 1 in the M mode, we say that A is "M major." B has shape (N, K) and strides (1, ldB), so B is "N-major." Similarly, C has shape (M, N) and strides (1, ldC), so C is "M major." + +How do we translate this into the BLAS user's experience? +The following table illustrates for B and C. +(Throughout the table, "Impl" stands for "implementation.") + +Note that the implementation reverses the order of B's modes, +and flips B's strides. +Recall that one evaluates a layout +by taking the dot product of the indices and strides. +Thus, reversing the order of both the modes and the strides +does not change this evaluation. + +| Matrix | User's shape | User's layout | User's strides | Impl layout | Impl shape | Impl strides | +| --- | --- | --- | --- | --- | --- | --- | +| C | M x N | Column major | (1, LDC) | M-major | (M, N) | (1, LDC) | +| A | M x K | Column major | (1, LDA) | M-major | (M, K) | (1, LDA) | + +What about the matrix B? We explained above that B is N-major. How would that translate back into the BLAS user's experience? We take a hint here from the filename including "nt." The "nt" part of the name means that A is not transposed, while B is transposed. The BLAS convention (see e.g., [the documentation for DGEMM](https://netlib.org/lapack/explore-html/d1/d54/group__double__blas__level3_gaeda3cbd99c8fb834a60a6412878226e1.html)) is that if you take the transpose, then the dimensions refer to the transpose ("with op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix"). Thus, this example actually computes `C = beta * C + alpha * A * B^T`, where `B^T` is an K x N matrix with strides (LDB, 1). The user's "original" matrix B is thus N x K, with strides (1, LDB) -- that's a column-major layout. (Reversing the modes and the strides preserves the layout, since evaluating the layout mapping just takes the dot product of indices and strides.) This lets us expand the above table to include B. + +| Matrix | Transposed? | User's shape | User's layout | User's strides | Impl layout | Impl shape | Impl strides | +| --- | --- | --- | --- | --- | --- | --- | --- | +| C | No | M x N | Column major | (1, LDC) | M-major | (M, N) | (1, LDC) | +| A | No | M x K | Column major | (1, LDA) | M-major | (M, K) | (1, LDA) | +| B | Yes | N x K | Column major | (1, LDB) | N-major | (N, K) | (1, LDB) | + +CuTe developers say: "In CuTe, you can't tell transposed +apart from non-transposed, MN-major from K-major, etc. +without inspecting the strides." +It's now a bit more clear what that means. +CuTe doesn't see whether A or B are transposed. +Instead, CuTe sees shapes and strides. +A CuTe developer must reason backwards from the shapes and strides +in order to see what the BLAS user sees. + +Why does CuTe do this? Consider that matrix multiply performs a reduction in the K-mode. From the user's perspective, it's reducing across rows of the first input matrix, but across columns of the second input matrix. If we instead mentally flip the modes of the first input matrix, then the implementation reduces over columns (the K mode) of both input matrices. This leads to two cases in which the implementation can effectively treat both input matrices in the same way. (If you call it with A and B reversed, it should even give the same results for these cases.) + +| Case | User asks for A | User asks for B | Abbreviation | +| --- | --- | --- | --- | +| A is M major, B is N major | Not transposed | Transposed | NT | +| A and B are both K major | Transposed | Not transposed | TN | + +This is why an introductory example starts with NT or TN. +For a summary of the four different transpose options for A and B, +and their corresponding implementation layouts, +please see the table below. + +| Transpose abbreviation | User sees A transposed? | User sees B transposed? | A's impl layout | B's impl layout | +| --- | --- | --- | --- | --- | +| NT | No | Yes | M major | N major | +| TN | Yes | No | K major | K major | +| NN | No | No | M major | K major | +| TT | Yes | Yes | K major | N major | + +#### MN-major and K-major + +As we mentioned above, there are two "preferred arrangements," TN and NT. In the TN arrangement, both A and B are K-major. In the NT arrangement, A is M-major and B is N-major. Even though the two stride-1 modes in NT have different names, it's still the leftmost mode for both A and B that has stride 1. Thus, we can think of the NT arrangement as "MN-major," analogous to how the TN arrangement is "K-major." + +The two preferred arrangements tend to work themselves into implementations, particularly when they use hardware instructions for accelerating matrix multiplies of blocks. In some cases, the hardware instruction may require NT (MN-major) or TN (K-major). For NN or TT, such instructions would require an intermediate transpose -- for example, when loading from global memory to shared memory. + +### Block layouts + +Efficient matrix multiply implementations loop over blocks. +For example, a typical GPU implementation strategy +is for each thread block to iterate over some number of blocks. +In the example, this loop occurs near the end of `gemm_device`. + +```c++ +// TUTORIAL: Example of a very simple compute loop +// Data is read from global to shared memory via the tA|tB partitioning +// gemm(.) operates on the shared memory directly via the tC partitioning + +auto k_max = size<2>(tAgA); + +for (int k = 0; k < k_max; ++k) +{ + // Copy A and B blocks from global memory to shared memory. + copy(tAgA(_,_,k), tAsA); + copy(tBgB(_,_,k), tBsB); + + // On some architectures, copy may be asynchronous. + // This may call for extra synchronization instructions + // beyond just __syncthreads(). + + __syncthreads(); + + // Compute gemm on shared memory input and register accumulator. + // The "epilogue" after this loop will copy the accumulator + // from the register file into global memory. + gemm(tCsA, tCsB, tCrC); + + __syncthreads(); +} +``` + +We will explain the notation in this loop below. The important things to remember are that the coordinate `k` loops over the blocks which the calling thread is supposed to compute, the `copy` functions copy A resp. B blocks from global memory (the first argument) to shared memory (the second argument -- same as C++'s `std::copy`, but the opposite of `memcpy`), and the `gemm` function computes C += A * B on the shared memory blocks. + +It turns out that copy takes an optional first argument, the "atom," as in the following. + +```c++ +copy(atom, source, destination); +``` + +The "atom" is metadata that explains how to do the copy operation. + +There are a few topics to push onto the stack. + +The copy function call shows a notation for taking slices of a tensor. A CuTe `Tensor` is a multidimensional array view. It consists of a pointer and a `Layout`. You can learn more about `Tensor`s elsewhere in CuTe's documentation, but for now, please note that `tAgA(_,_,k)` means "create a Tensor that views (i, j, k) for all valid i, all valid j, and a specific value of k." The result has rank one less than the original Tensor. CuTe's underscore means the same thing as a single stand-alone colon in Fortran or Matlab. Note also that CuTe uses the same notation for slices as for tensor indexing. The implementation can distinguish the two cases by checking whether any of the arguments is an underscore. In contrast, the C++23 class mdspan uses a separate function, `submdspan` (not in C++23, and proposed for C++26; see [P2630](https://wg21.link/p2630)), for slicing. + +Fully understanding what `copy` and `gemm` do calls for learning about thread layouts as well, so we will wait to explain them completely. For now, note that these functions are implicitly parallel, as they are called collectively by all threads in a thread block. + +The block dimensions are defined near the top of the host function `gemm`. + +```c++ +// Define block sizes (static) +auto bM = Int<128>{}; +auto bN = Int<128>{}; +auto bK = Int< 8>{}; +``` + +We see that these are fully compile-time dimensions. This is often the case, especially when we use hardware instructions that only work for certain problem dimensions. Three lines of code immediately below these construct the block layouts. + +```c++ +// Define the block layouts (static) +auto sA = make_layout(make_shape(bM,bK)); +auto sB = make_layout(make_shape(bN,bK)); +auto sC = make_layout(make_shape(bM,bN)); +``` + +Here, the block layouts just come from the block dimensions. A Layout has two things: a Shape, and Strides. If the caller does not provide Strides, then CuTe computes Strides corresponding to the default "column-major" arrangement of data. This just happens to match the global matrices' layouts, but in general doesn't have to. For example, in the NN or TT cases, we may want to transpose one of the input matrices when copying from global memory to shared memory. + +The example "comments out" some code that prints all the layouts on "thread 0" of each thread block. If you enable the printing code and run the example, it will print all the layouts. For example, sA prints as + +``` +sA +(_128,_8) +(_1,_128) +``` + +and sB prints as + +``` +sB +(_128,_8) +(_1,_128) +``` + +consistently with the definitions above. + +If you have looked at other GEMM examples in CuTe, you might be wondering about hardware matrix-matrix multiply instructions. Those instructions tend to require certain values for shapes and strides, that may be a function of the matrix's element type. CuTe knows about these instructions and their required shapes and strides. We will go into more detail about that elsewhere. + +The `gemm_device` top-level kernel uses these block layouts to allocate shared memory buffers for A and B tiles. + +```c++ +// Shared memory buffers +__shared__ TA smemA[cosize_v]; +__shared__ TB smemB[cosize_v]; +``` + +Note how the shared memory buffers' sizes depend only on the A resp. B layouts (and element sizes). What's a `cosize_v`? The "`_v`" is a C++ naming convention that specifies a function from one or more template argument(s), to a value. In this case, it's a number of elements. A layout is a function from a set of multidimensional coordinates to a set of one-dimensional array offsets. It's a function, so we can speak of its domain and codomain. The "cosize" of a layout is the size of its codomain. (See e.g., CuTe's implementation of `Layout`.) If we want to allocate a linear array, for which all the offsets produced by a layout are valid, then we can use the cosize of the layout as the length of the array (in terms of number of elements, not in terms of number of bytes). + +### Thread layouts + +CuTe uses a `Layout` to describe the assignment of threads to work items. +In this example, the host function `gemm` constructs the thread layouts +for A, B, and C. + +```c++ +// Define the thread layouts (static) +auto tA = make_layout(make_shape(Int<32>{}, Int< 8>{})); +auto tB = make_layout(make_shape(Int<32>{}, Int< 8>{})); +auto tC = make_layout(make_shape(Int<16>{}, Int<16>{})); +``` + +That is, the thread layout for the A read is M-major 32x8, for the B read is N-major 32x8, and for the C compute/write is M-major 16x16. These thread layouts will partition the data for their respective stages. + +#### The example uses compile-time thread and block layouts + +Note that the device function `gemm_device` insists that all the thread and block layouts are static -- that is, known at compile time. You can see this from the `CUTE_STATIC_ASSERT` statements near the top of `gemm_device`. `CUTE_STATIC_ASSERT` is a wrapper for `static_assert`, which fails at compile time if its condition is `false`. + +```c++ +// Preconditions +CUTE_STATIC_ASSERT(is_static::value); +CUTE_STATIC_ASSERT(is_static::value); +CUTE_STATIC_ASSERT(is_static::value); + +CUTE_STATIC_ASSERT(is_static::value); +CUTE_STATIC_ASSERT(is_static::value); +CUTE_STATIC_ASSERT(is_static::value); +``` + +Use of static layouts has two advantages. First, it makes it easier to prove correctness of the algorithm. If the code compiles, it's likely correct. (On the other hand, new CuTe users may find themselves doing more debugging at compile time than they have before.) Second, it makes it easier and faster for CuTe to dispatch to the correct optimized implementations (called "atoms" -- see below) for copying blocks and performing matrix multiplies. + +#### The example's block gemm is parallel over elements of C + +In the actual device function, `tC` has layout `CThreadLayout`. You might recall that the kernel function `gemm_device` uses `CThreadLayout` to derive the launch bounds, specifically the maximum number of threads per block. The launch bounds show up in the declaration of `gemm_device`. + +```c++ +template +__global__ static +__launch_bounds__(decltype(size(CThreadLayout{}))::value) +void +gemm_device(MShape M, NShape N, KShape K, + TA const* A, AStride dA, ABlockLayout blockA, AThreadLayout tA, + TB const* B, BStride dB, BBlockLayout blockB, BThreadLayout tB, + TC * C, CStride dC, CBlockLayout , CThreadLayout tC, + Alpha alpha, Beta beta); +``` + +The "size" of `CThreadLayout` is the total number of threads, 16 * 16 = 256. (We take `::value` because the size is actually `Int<256>`, a compile-time constant with a `static constexpr int value = 256` member.) This suggests that the block gemm function (in the loop over blocks) parallelizes over elements of the C block. We can see this as well from the kernel launch (at the end of the `gemm` host function), which uses the size of `CThreadLayout` as the block dimension. + +```c++ +// Define the thread layouts (static) +auto tA = make_layout(make_shape(Int<32>{}, Int< 8>{})); +auto tB = make_layout(make_shape(Int<32>{}, Int< 8>{})); +auto tC = make_layout(make_shape(Int<16>{}, Int<16>{})); + +dim3 dimBlock(size(tC)); +dim3 dimGrid(ceil_div(size(M), size(bM)), + ceil_div(size(N), size(bN))); +gemm_device + <<< dimGrid, dimBlock, 0, stream >>> + (M, N, K, + A, dA, sA, tA, + B, dB, sB, tB, + C, dC, sC, tC, + alpha, beta); +``` + +Note that dimBlock is single-dimensional (despite being a dim3), as the size of a layout is a single value. We can see this also because the example only ever uses `threadIdx.x`, not `threadIdx.y`. Yet, C's thread layout has shape (16, 16). What's with that? Recall that a thread layout maps from a "logical" coordinate space (possibly multidimensional tuples of indices) to (one-dimensional) integer indices. In this case, `CThreadLayout` maps from pairs of indices in the Cartesian product space {0, 1, 2, ..., 15} x {0, 1, 2, ..., 15}, to one-dimensional indices 0, 1, 2, ..., 255. The latter, the output of `CThreadLayout`, is the actual thread index `threadIdx.x` in this case. `CThreadLayout` has only a shape (16, 16) and no nondefault strides, so it uses CuTe's default column-major arrangement (with strides (1, 16) in this case). + +#### What does `local_tile` do? + +The following code near the top of `gemm_device` +operates on the "global" (input and output) matrices A, B, and C +(where mA, mB, and mC are their Tensor representations). + +```c++ +// Get the appropriate blocks for this thread block -- +// potential for thread block locality +auto blk_shape = make_shape(size<0>(sA), size<0>(sB), size<1>(sB)); // (BLK_M,BLK_N,BLK_K) +auto blk_coord = make_coord(blockIdx.x, blockIdx.y, _); // (m,n,k) + +Tensor gA = local_tile(mA, blk_shape, blk_coord, Step<_1, X,_1>{}); // (BLK_M,BLK_K,k) +Tensor gB = local_tile(mB, blk_shape, blk_coord, Step< X,_1,_1>{}); // (BLK_N,BLK_K,k) +Tensor gC = local_tile(mC, blk_shape, blk_coord, Step<_1,_1, X>{}); // (BLK_M,BLK_N) +``` + +There are two new features here: + +* `make_coord`, which returns a `Coord`, a multidimensional index which can be used as the input of a `Layout`; and + +* `local_tile`, which we will explain below. + +The `Coord`(inate) `blk_coord` refers to the set of blocks (indexed by k -- the underscore here indicating a free parameter) our thread block will access. (The index k here doesn't mean the K mode; it's the same index as in the loop over blocks that does the computation.) + +If we print out the `gA`, `gB`, and `gC` layouts, we get the following. + +``` +gA +(_128,_8,512) +(_1,5120,40960) + +gB +(_128,_8,512) +(_1,5120,40960) + +gC +(_128,_128) +(_1,5120) +``` + +All of these layouts come from the original input or output matrices A, B, and C. Thus, they preserve the original strides, which are all the same in this example (when using default problem dimensions), 5120. This is most easily seen in the gC layout. For the other layouts, there is a clue in 5120 * 8 = 40960. That is, every time we increase k by one, we "skip over 8 columns" of the global matrix, over to the next block of data. This illustrates an important feature of CuTe, that it can view the same data with different modes and/or strides, as a way to identify parallelism or locality. + +## Next steps + +The above "simple GEMM" example's performance on many problems +is asymptotically optimal +with respect to the GPU's floating-point throughput. +Getting nearly peak performance +relative to the GPU's floating-point throughput, +for a wider variety of problem dimensions, +calls for more advanced techniques. +Please refer to other examples in this repository +to learn more about those techniques. +For example, the +[predication section of the tutorial](./0y_predication.md) +explains what to do if a matrix tiling +doesn't perfectly divide the matrix. + +### Implement GEMM as generalized tensor contraction (GETT) + +"GETT" here stands for "general(ized) tensor times tensor," +a tensor contraction. + +CuTe permits matrices to have nested `Layout`s. +For example, a matrix A can have a nested `Layout` for its M and N modes. +This means that we can use a "matrix" (`Tensor` with two modes) +to represent any `Tensor`. +This amounts to a "native hierarchical representation." + +As a result, we can implement GETT by using +our existing GEMM implementation layers, +with a little bit of fancy custom predication for the K mode. +This is because the stride type of A +and the problem shape itself +are CuTe Shapes and Strides. +This lets us represent the hierarchical modes +of a tensor contraction problem +(which still fundamentally only have 4 modes -- +batch mode, +two outer modes (one for A and one for B), +and one reduction mode -- +each of which can now have as many nested modes as you want +for the contraction's inputs). +We thus implement GETT as contraction just in one mode -- the K mode. +However, K itself can be hierarchical and can have noncontiguous strides. +We can reorder the modes such that all contraction modes +become a single, possibly hierarchical K mode in the kernel. +This is how we would encode a contraction in multiple modes at once. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/0y_predication.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/0y_predication.md new file mode 100644 index 0000000000000000000000000000000000000000..f764508bf1272f4a5842f3251987d9604edbbcbe --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/0y_predication.md @@ -0,0 +1,217 @@ +# Predication: What to do when tiling isn't perfect + +The [GEMM tutorial](./0x_gemm_tutorial.md) shows how +we compute a matrix-matrix multiply +by iterating over tiles of the input matrices and output matrix. +The examples all assume that the tiles fit evenly into the matrices, +with no remainder. +What do we do if this is not the case? +For example, we might want to tile a 41 x 55 matrix into 4 x 8 tiles, +but 41 / 4 is 10 remainder 1, and 55 / 8 is 6 remainder 7. +What do we do with those "leftover" parts of the matrix? + +Another way to say this, is that `logical_divide` +(CuTe's way of tiling layouts) "rounds up." +For example, if `N` is the layout (1000, 1) and `B` is the layout (128, 1), +then `logical_divide(N, B)` is the layout ((128, 8), (1, 128)). +This effectively rounds up the original shape N = 1000 +into an 128 x 8 matrix (as if N = 1024). +What about those last 24 elements, +that aren't part of the original data? + +The idiomatic CuTe way to solve this problem is through "predication." +Rather than trying to reason about the "remainder tiles," +CuTe instead rounds up, but only tries to access data in each tile +that are part of the matrix. +This corresponds well with how our GPUs optimize: +branches without warp divergence are relatively fast. +It also matches the usual CUDA idiom +when dividing N work items in 1-D fashion over B thread blocks: +first test if "my thread" is out of bounds before doing work. + +There are a few ways to figure out +which elements need to be predicated. +In-kernel GEMMs like to do this in the following way. + +```c++ +// Create the predicate tensor +Layout idA = make_layout(shape(A)); // e.g. 1000:1 +Layout idAB = logical_divide(idA, B); // e.g. (128,8):(1,128) + +Tensor pred = make_tensor(shape(idAB)); +for (int i = 0; i < size(pred); ++i) { + pred(i) = idAB(i) < size(A); +} + +// ... intervening code ... + +// Use the predicate tensor. c is some coordinate. +// This code would likely live inside some algorithm. +if (pred(c)) { copy(idAB(c), smem(c)); } +``` + +The general procedure is that we + +1. create an "identity" layout (`Layout idA = make_layout(shape(A))`, + in the above example) with the same shape as our original data; + +2. repeat the same tiling/partitioning/slicing (possibly rounding up) + on that identity layout (`Layout idAB = logical_divide(idA, B)`); + +3. create a "predicate tensor" by comparing the coordinates + of that reference layout with the bounds of the original layout; + and then + +4. use the predicate tensor to mask off accesses to out-of-bounds elements. + +For example, suppose that we've partitioned A and B tiles +across threads as follows. + +```c++ +Tensor tAgA = local_partition(gA, tA, thread_idx); // (THR_M,THR_K,k) +Tensor tAsA = local_partition(sA, tA, thread_idx); // (THR_M,THR_K,PIPE) + +Tensor tBgB = local_partition(gB, tB, thread_idx); // (THR_N,THR_K,k) +Tensor tBsB = local_partition(sB, tB, thread_idx); // (THR_N,THR_K,PIPE) +``` + +`tAgA` and `tBgB` partition the global A resp. B matrices over threads, +and `tAsA` and `tBsB` partition the shared memory tiles of A resp. B over threads. + +The following code creates predicate tensors +corresponding to `tAgA` and `tBgB`. +They will be computed once in the prologue. +and will be used to mask off instructions in the inner loop. + +```c++ +Tensor tApA = make_tensor(make_shape (size<0>(tAgA), size<1>(tAgA)), + make_stride( Int<1>{}, Int<0>{})); +Tensor tBpB = make_tensor(make_shape (size<0>(tBgB), size<1>(tBgB)), + make_stride( Int<1>{}, Int<0>{})); +``` + +We're only thread-parallelizing over the leftmost (row) dimension, +so we only need to predicate over the leftmost dimension. +Thus, we can make the rightmost (column) stride zero, +since we will never actually address the rightmost dimension. + +The following code creates "two-dimensional identity tensors" +that map coordinates (m,k) -> (m,k) +for the tile of data within the thread block. + +```c++ +Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k) +Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k) +``` + +The following lines then tile and partition +the two reference tensors +in exactly the same way the data were tiled and partitioned +into `tAsA` and `tBsB`. + +```c++ +Tensor tAcA = local_partition(cA, tA, thread_idx); +Tensor tBcB = local_partition(cB, tB, thread_idx); +``` + +Tiling and partitioning affect the offset and domain, +but not the codomain of the tensors, +so we're left with tensors that map `(thr_m,thr_k) -> (m,k)` +where `(thr_m,thr_k)` is this particular thread's subtensor of the tile +and `(m,k)` is the original codomain: a coordinate into the original tile. + +The unrolled loops in the code below then compare +the m- and n-coordinates of those tensors with our known maximums +to mask off elements we are not allowed to access. + +```c++ +Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k) +Tensor tAcA = local_partition(cA, tA, thread_idx); + +Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k) +Tensor tBcB = local_partition(cB, tB, thread_idx); + +// Populate +CUTE_UNROLL +for (int m = 0; m < size<0>(tApA); ++m) { + tApA(m,0) = get<0>(tAcA(m,0)) < m_max_coord; +} +CUTE_UNROLL +for (int n = 0; n < size<0>(tBpB); ++n) { + tBpB(n,0) = get<0>(tBcB(n,0)) < n_max_coord; +} +``` + +Those last `for` loops fill in the two predicate tensors. +In this case, we only need to predicate over the leftmost dimension, +so we only address `(m,0)` resp. `(n,0)`. + +We can then use the predicate tensors in `copy_if` +to copy only the elements for which the corresponding +predicate tensor elements are nonzero. + +```c++ +// Prefetch k_tile=0, gate these on k_residue as well +CUTE_UNROLL +for (int k = 0; k < size<1>(tAsA); ++k) { + if (get<1>(tAcA(0,k)) >= -k_residue) { // some other condition on the column index + copy_if(tApA, tAgA(_,k,0), tAsA(_,k,0)); + } +} + +CUTE_UNROLL +for (int k = 0; k < size<1>(tBsB); ++k) { + if (get<1>(tBcB(0,k)) >= -k_residue) { // some other condition on the column index + copy_if(tBpB, tBgB(_,k,0), tBsB(_,k,0)); + } +} +``` + +Here are some advantages of this "reference tensor" approach. + +1. It doesn't depend on the layout/strides of the tensor + being predicated, just the logical bounds being imposed. + +2. The partitioning stage can be anything. + +3. It naturally extends to any-dimensional predication. + +4. It's a natural generalization of a typical CUDA 1-D + parallel vector access pattern, + which computes an access index `k` + (e.g., as `blockDim.x * blockIdx.x + threadIdx.x`) + and then predicates access to the vector's `k`-th element + on whether `k` is in bounds. + +As an example of (3), the epilogue predication does exactly the same thing, + +```c++ +// Repeat with a tensor of coordinates for predication +Tensor cC = make_identity_tensor(make_shape(size<0>(gC), size<1>(gC))); +Tensor tCcC = thr_mma.partition_C(cC); + +const bool isBetaZero = (beta == 0); + +CUTE_UNROLL +for (int i = 0; i < size(tCrC); ++i) { + if (elem_less(tCcC(i), make_coord(m_max_coord,n_max_coord))) { + tCgC(i) = isBetaZero ? alpha * tCrC(i) : alpha * tCrC(i) + beta * tCgC(i); + } +} +``` + +but with the mma responsible for the tiling/partitioning `tCcC` +so that the reference subtensor matches the accumulator's subtensor. +Then, the reference subtensor is predicated against the `if` bounds +(in both m- and n-coordinates) inside the `for` loop. + +Another way to explain this is that we don't modify the tiles +to give you the "right" extents so that you never overrun. +Instead, we let you query the original coordinate +to see if that coordinate overruns. +This avoids all branching and variable/dynamic loop bounds +(thus maintaining load balance and synchronicity, +both very important in-kernel) in favor of predication. +It's also general enough to extend to all ranks, +all layouts of threads and data, +and all tiling/partitioning patterns. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/0z_tma_tensors.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/0z_tma_tensors.md new file mode 100644 index 0000000000000000000000000000000000000000..3e0d0b1c7d6b0f0c231b5df3843a74c6cc6561cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cute/0z_tma_tensors.md @@ -0,0 +1,83 @@ +# TMA tensors + +TMA tensors have three differences from +"ordinary" global memory tensors. + +1. The tensor's iterator stores a base coordinate, + not a pointer. + +2. The tensor's actual global memory pointer + does not live in the tensor. + Instead, it lives in a TMA descriptor, + which is stored in the TMA `Copy_Traits` specialization. + +3. The tensor's strides aren't just integers. + Instead, they are linear combinations of "basis functions." + +The following sections will elaborate these differences. + +## Iterator stores a base coordinate, not a pointer + +"Ordinary" tensors of global memory have an iterator type +(the "Engine" template parameter) that wraps a pointer. +For example, `gmem_ptr` wraps a `T*`. +A TMA tensor's iterator type is `ArithmeticTupleIterator`. +`ArithmeticTupleIterator` stores a coordinate +(a tuple of integers) instead of a pointer. +The coordinate is represented as an `ArithmeticTuple`, +which is just a (public subclass of) `cute::tuple` +that has an overloaded `operator+`. +The sum of two tuples is the tuple of the sum of the elements. + +When we perform the TMA load or store, +the iterator's coordinate goes into the PTX instruction. +(For TMA specializations of `Copy_Traits`, +this happens in the `private` member function `copy_unpack_`.) +The coordinate represents the tensor's "base coordinate." +For tiled TMA, the base coordinate of the whole tensor +might start out as (0, 0, ..., 0). However, slicing the tensor +might result in a different base coordinate. +For im2col TMA load, the base coordinate is the lower corner. + +## Pointer lives in TMA descriptor, not tensor + +The TMA descriptor has the actual pointer to global memory in it. +Storing the TMA descriptor in the tensor would make tensors +expensive to copy and slice, as the TMA descriptor is 128 bytes. +Instead, we store the TMA descriptor +in the `Copy_Traits` specialization. + +## Tensor's strides aren't just integers + +For "ordinary" tensors, the layout takes a coordinate +`(i, j)` as input, and returns a single integer offset `k`. +The resulting pointer-to-element +is the base pointer, plus the offset k. +However, TMA loads and stores don't take a pointer. +They take a TMA descriptor, and a coordinate `(i, j)`. +Building the strides out of "basis functions" +is the trick to make the layout return a coordinate -- +a tuple of integers -- instead of just a single integer offset. +A "basis function" for strides +is a lot like a basis function for Euclidean space, +except that strides' basis functions can be hierarchical. + +Layouts work by taking the inner product +of their input coordinate with the strides. +For "ordinary" integer strides, e.g., `(1, 100)`, +the inner product of the input coordinate `(i, j)` +and the strides is `i + 100j`. +That gives the formula for the offset. +For strides built of basis functions, for example, +if the strides are `(_1@0, _1@1)`, +then the inner product of the input coordinate `(i, j)` +with the strides is `i@0 + j@1`. +The `i` here is a coefficient of the basis function `@0`, +and `j` is a coefficient of the basis function `@1`. +The result is a vector sum. We _interpret_ this result as +"the zeroth coefficient is i, and the first coefficient is j." +That translates into the (TMA) coordinate `(i, j)`. +If we wanted to reverse the coordinates, +then we could use `(_1@1, _1@0)` as the strides. +Evaluating the layout would give `i@1 + j@0`, +that is, `(j, i)`. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cutlass_3x_backwards_compatibility.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cutlass_3x_backwards_compatibility.md new file mode 100644 index 0000000000000000000000000000000000000000..354e70dd4891874c33deda3697f1a42986a71f24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cutlass_3x_backwards_compatibility.md @@ -0,0 +1,473 @@ +[README](/README.md#documentation) > **CUTLASS 3.0 GEMM Backwards Compatibility** + +# CUTLASS 3.0 GEMM Backwards Compatibility + +Although CUTLASS 3.0 restructures the GEMM hierarchy and introduces new types for the +threadblock layer and below, we intend the entire source code to be usable in user applications. +We expect users to be able to `#include` any source file from CUTLASS 3.0, whether +they implement the 2.x or the 3.x API, without breaking user builds. This means that a single +translation unit should be able to contain any valid kernel regardless of its API version. The +sections below discuss how `device` and `kernel` layer type names are made compatible across the +two API versions, and what the users can expect out of the `threadblock` layer API going forward. + +## Compatible Device API + +The entry point for CUTLASS's Device GEMM API +is the class +`cutlass::gemm::device::GemmUniversalAdapter`. +This class lives in the header file +[include/cutlass/gemm/device/gemm_universal_adapter.h](/include/cutlass/gemm/device/gemm_universal_adapter.h). + +`GemmUniversalAdapter` is a "universal adapter" +and serves as a common device interface +for both CUTLASS 3.x and CUTLASS 2.x kernels. +Its template parameter `GemmKernel`, +the GEMM kernel type, can be any of the following: + +* `cutlass::gemm::kernel::GemmUniversal`, + implementing CUTLASS 3.x API kernels; +* `cutlass::gemm::kernel::GemmUniversal`, + implementing CUTLASS 2.x API kernels; +* Any valid CUTLASS 2.x `kernel` layer GEMM that + was previously composable with `device::GemmUniversalAdapter` + +Users implementing new kernels in either API should prefer +using `kernel::GemmUniversal` as the kernel type +and compose it with `device::GemmUniversalAdapter`. +Users with existing `kernel::Gemm` kernels +can continue to use them as template arguments +of `device::GemmUniversalAdapter`. They can adopt +`GemmUniversal` as a gradual migration path, +since `GemmUniversal` accepts either 3.0 or 2.x collectives. +Please see the [next section for `kernel::GemmUniversal`](#compatible-kernel-api) for details. + +`GemmUniversalAdapter` presents a single +host-side interface to both 3.0 and 2.x kernels. +CUTLASS accomplishes this by +specializing `GemmUniversalAdapter`'s implementation +on either 2.x API implementing kernel layer GEMMs, or 3.x API +implementing kernel layer GEMMs (as detected by `gemm::detail::IsCutlass3GemmKernel` +discussed below). As a result, `GemmUniversalAdapter`'s behavior +might differ between the two specializations. + +### Device API design differences + +In CUTLASS 2.x, the Device API was more closely tied +to the Kernel API. In CUTLASS 3.0, the Device API +accepts any kernel type that meets the Kernel API +interface requirements. CUTLASS 3.0's Device API code is +parameterized by the kernel type, but this code +is *generic*; the same code works for any kernel type. + +The device layer compatibility interface, `device::GemmUniversalAdapter`, +also provides reflective mappings from 3.0-specific types +back to the closest possible 2.x equivalent types. This is [discussed further in the section below](#conversions-between-2x-tags-and-30-types). + +CUTLASS 3.0's `device::GemmUniversalAdapter` also exposes some new APIs that the 2.x `device::GemmUniversalAdapter` implementation does not. Most notably, this includes the ability to bypass the `GemmKernel::Arguments` to `GemmKernel::Params` lowering. + +```c++ +// Primary run() entry point API that is static allowing users to create and manage their own params. +static Status +run(Params& params, cudaStream_t stream = nullptr); +``` + +This new API is useful for the following scenarios. + +* Running again does not require reinvoking `GemmKernel::to_underlying_arguments()` +* Manual control over construction of `GemmKernel::Params` for custom kernels with custom stride types +* Fully static problem shapes and strides for bespoke kernels where no argument mapping needs to take place + +## Compatible Kernel API + +CUTLASS 3.x API shares the kernel layer API with CUTLASS 2.x +through the single entry point type `cutlass::gemm::kernel::GemmUniversal`. +All kernel layer GEMMs are viewed as a composition of a collective mainloop +and a collective epilogue. + +**`kernel::GemmUniversal` implements both 2.x and 3.x APIs** + +The entry point for CUTLASS's kernel API is the class +`cutlass::gemm::kernel::GemmUniversal`. +This class' declaration lives in the header file +[include/cutlass/gemm/kernel/gemm_universal.hpp](/include/cutlass/gemm/kernel/gemm_universal.hpp). + +```c++ +/* + * Stateless universal device GEMM kernel type that treats GEMM as + * a composition of a collective mainloop and a collective epilogue. + * SFIANE shims both 2.x and 3.0 API kernels based on ProblemShapeOrThreadblockMma_. +**/ +template < + class ProblemShapeOrThreadblockMma_, + class CollectiveMainloopOrEpilogue_, + class CollectiveEpilogueOrThreadblockSwizzle_, + class TileScheduler_ = void, + class Enable = void +> +class GemmUniversal; +``` + +We call this class "universal" because it can be built +using either the CUTLASS 3.0 or the 2.x mainloops and epilogues. +If `GemmUniversal`'s first template argument +(`ProblemShapeOrThreadblockMma_`) is a `cute::tuple`, +then `GemmUniversal` assumes that +the remaining three template arguments +(the mainloop, epilogue, and grid swizzle) +implement the 3.0 APIs. +Otherwise, `GemmUniversal` assumes that +the remaining three template arguments +implement the 2.x APIs. +All the template arguments must be either +CUTLASS 3.0 or CUTLASS 2.x types. For example, +`GemmUniversal` does not permit using +a 2.x mainloop with a 3.0 collective epilogue. + +CUTLASS 3.x implements various embodiments of `kernel::GemmUniversal`. +Each kernel layer schedule is specialized +for a GEMM scheduling algorithm and GPU architecture. +Specializations of `kernel::GemmUniversal` for 3.0 APIs live in +any of various `gemm_*.hpp` files in the directory +[include/cutlass/gemm/kernel/](../../include/cutlass/gemm/kernel/). +The specialization to which to dispatch is decided through the dispatch policy's `Schedule` type. + +Specializations for 2.x APIs live in the header file +[include/cutlass/gemm/kernel/gemm_universal.h](../../include/cutlass/gemm/kernel/gemm_universal.h). + +### Kernel API design differences + +The CUTLASS 2.x Kernel API was more closely tied +to the Device API, as we mentioned above. +In particular, the 2.x Device API specified the grid shape +used to launch the Kernel API. +In CUTLASS 3.0, the Kernel API controls its own grid shape, +while the device adapter simply queries the kernel with which it needs to be launched. + +This change is required to support various kernel schedules +that may need their own schedule specific grid planning logic. +For example, persistent kernel schedules generally only launch with +as many threadblocks as the number of multiprocessors on the GPU. + +All CUTLASS 3 `kernel::GemmUniversal` specializations expose the following (static) API: + +```c++ +// Returns true if the kernel can execute the provided GEMM arguments. +static bool +can_implement(Arguments const& args); + +// Returns a dim3 representing the threadblock shape. +static dim3 +get_block_shape(); + +// Returns a dim3 representing the grid shape in terms of threadblocks. +static dim3 +get_grid_shape(Params const& params); +``` + +The device adapter simply queries the kernel for these three before launching it on the device. +CUTLASS 3.0 provides a meta-function to detect whether a `cutlass::gemm::kernel::*` implements +the 3.x API or 2.x API: + +```c++ +// include/cutlass/gemm/gemm.h + +namespace cutlass:gemm::detail { + +// The following metafunction is used to detect whether a +// `kernel::Gemm` or `kernel::GemmUniversal` implements the CUTLASS 3.x API, +// by checking whether the problem shape type is aliased within. +template +struct IsCutlass3GemmKernel; + +} // namespace cutlass:gemm::detail +``` + +Users can dispatch their generic code against 2.x and 3.x specializations with +this as a type trait for the kernel API version. + +## Threadblock API and Inner Loops + +Much of the CUTLASS 3 GEMM hierarchy for mainloops and inner loops diverges +from that of CUTLASS 2.x. With that also comes the introduction of the +`cutlass::gemm::collective` layer as a direct replacement and a superset +of the 2.x `cutlass::gemm::threadblock` layer. Going forward, +CUTLASS 3.x will discontinue new developments in the following namespaces. + +* `cutlass::*::threadblock::*` +* `cutlass::*::warp::*` +* `cutlass::gemm::thread::*` +* `cutlass::arch::*` (except `barrier.h`) + +`cutlass::gemm::collective`s are a superset of the threadblock layer where +all new mainloops will be developed. Users should look to the `CollectiveMma` type +if they wish to author custom mainloop code in the 3.x API. + +Similarly, for the GEMM inner loops, `cute::MMA_Atom`s replace the +`gemm::warp` and `gemm::thread` layer code. Going forward, all new PTX instructions +and associated metadata development will occur directly inside [`cute/arch/*.hpp`](/include/cute/arch/) and [`cute/atom/*.hpp`](/include/cute/atom/). + +The desired inner loop MMA iteration order and tiling can be achieved through careful +selection of the atom layout, value layout, and permutations of the `cute::TiledMma`. + +For epilogues, the `cutlass::epilogue::collective` layer replaces `cutlass::threadblock::collective`. However, the thread-level epilogue elementwise operations +in `cutlass::epilogue::thread` will continue to be used in 3.x kernels as well, albeit, with +a more idiomatic epilogue vectorization strategy. +[Example 50](/examples/50_hopper_gemm_with_epilogue_swizzle/50_hopper_gemm_with_epilogue_swizzle.cu) +shows how to use 2.x epilogue thread operators with 3.0 API kernels. + +## Porting from 2.x to 3.0 API + +### CUTLASS 2.x layout tags and CUTLASS 3.0 major modes + +CUTLASS 2.x and CUTLASS 3.0 use both +different wording and different types +to describe the permitted layouts +of GEMM's input matrices A and B. + +CUTLASS 3.0 does not use the terms "column major" +or "row major" to describe matrix layouts. +Starting with CUTLASS 3.0, adoption of CuTe allows us to decouple + +* the coordinate mode order (logical shape) of layouts from + +* the index space stride order of the backing storage. + +In line with our switch to a conceptual GEMM hierarchy, we view the major modes not from a BLAS-3 perspective. +Rather, we divide the modes into two categories. + +* "Inner modes" or "K-modes" are contracted over during the GEMM. + Therefore, they are not present in the output tensor. + +* "Outer modes" or "MN-modes" are preserved in the output. + +Now, instead of `RowMajor` or `ColumnMajor`, whose major stride depends on whether we are referring to the +A or the B matrix, we uniformly employ the "K major" or "MN major" terminology and enforce the convention of all tensors having the shape `[M/N, K, L]` regardless of which mode is major. That is, + +* the input matrix A has shape M x K, +* the input matrix B has shape N x K, and +* the input/output matrices C/D have shape M x N. + +Note that this convention for B +differs from the BLAS's GEMM interface, +which specifies that B has shape K x N. + +CUTLASS 3.0 uses these names of the modes +to specify which mode of a matrix has stride 1. +For the matrix A, + +* "M major" means that the matrix is stride 1 + in the M mode, and +* "K major" means that the matrix is stride 1 + in the K mode. + +For the matrix B, + +* "N major" means that the matrix is stride 1 + in the N mode (which for B is mode 0, + because the convention is that B is N x K); and +* "K major" means that the matrix is stride 1 + in the K mode (which for B is mode 1). + +CUTLASS 2.x defines "layout tag" classes +`cutlass::layout::ColumnMajor` and `cutlass::layout::RowMajor`, +that live in the header file +[`cutlass/layout/matrix.h`](/include/cutlass/layout/matrix.h). +The interpretation of these layouts in GEMM +depends on whether they are applied +to the input matrix A or B. For the matrix A, "column major" means +that mode corresponding to M extent has stride 1, +and "row major" means that mode corresponding to K extent has stride 1. +This is the usual computer science definition +of column major and row major for a rank-2 array. +For the matrix B, the opposite holds: +"column major" means that mode corresponding to N extent has stride 1, +and "row major" means that mode corresponding to K extent has stride 1. + +Using the convention of `[outer, inner, batch]` mode order for tensor logical shapes +avoids potential confusion with the meaning of column major and row major +changing depending on whether they are applied to A or B. + +The table below summarizes our mode order convention and +mapping of 2.x layout tags to corresponding M-major, N-major, or K-major strides. + +| Matrix | CUTLASS 2.x layout | 2.x Shape | Logical major mode| 3.x Shape/Stride | Major ordinal | +| --- | --- | --- | --- | --- | --- | +| A | `ColumnMajor` | M x K | M major | M x K x L | 0 (outer) | +| A | `RowMajor` | M x K | K major | M x K x L | 1 (inner) | +| B | `RowMajor` | K x N | N major | N x K x L | 0 (outer) | +| B | `ColumnMajor` | K x N | K major | N x K x L | 1 (inner) | +| C | `ColumnMajor` | M x N | M major | M x N x L | 0 (outer) | +| C | `RowMajor` | M x N | N major | M x N x L | 1 (inner) | + +Notice that in CUTLASS 3.0, interpretation of layouts no longer changes based on +whether we are talking about the A or B matrix. M and N major inputs always have a +static size-1 stride in their 0th (outer) mode. Similarly, K major inputs +always contain the static size-1 stride in their 1st mode. This uniformity in stride order +allows us to represent tensor layouts much more cleanly and treat both A and B equally in our interfaces. +See for example the following snippet from our [`kernel/sm70_gemm.hpp`](/include/cutlass/gemm/kernel/sm70_gemm.hpp) +for Ampere kernel schedules. + +```c++ +// Represent the full tensors +Tensor mA_mkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_A), make_shape(M,K,L), params.mainloop.dA); // (m,k,l) +Tensor mB_nkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_B), make_shape(N,K,L), params.mainloop.dB); // (n,k,l) + +// Get batch slice +Tensor mA_mk = mA_mkl(_,_,get<3>(blk_coord_mnkl)); // (m,k) +Tensor mB_nk = mB_nkl(_,_,get<3>(blk_coord_mnkl)); // (n,k) + +// Slice to get the tiles for which this thread block is responsible +Tensor gA = local_tile(mA_mk, blk_shape, take<0,3>(blk_coord_mnkl), Step<_1, X,_1>{}); // (BLK_M,BLK_K,k) +Tensor gB = local_tile(mB_nk, blk_shape, take<0,3>(blk_coord_mnkl), Step< X,_1,_1>{}); // (BLK_N,BLK_K,k) +``` + +As seem in this snippet, all input tensors have the logical shape `[outer, inner, batch]`, +and the strides could represent either outer or inner +(or any other complex hierarchical stride) major storage. +CuTe layouts always maintain the logical consistency of the coordinate spaces regardless of the strides. + +By convention, in CUTLASS 3.0, we treat the M and N mode as the 0th mode, +and K mode as the 1st mode of the stride. + +### Conversions between 2.x tags and 3.0 types + +Starting with CUTLASS 3.0, all layouts are described using +`cute::Shape` and `cute::Stride` which compose into a `cute::Layout`. +In CUTLASS 2.x, various layout tags such as `cutlass::layout::RowMajor` are used to specialize +template implementations. These tag types only encode information about the tensor strides, +as 2.x layouts did not incorporate any concept of tensor shape in the layout tags themselves. +Users may find a need to convert between CUTLASS 2.x layout tags, and 3.0 +CuTe stride types. CUTLASS 3.0 `gemm::collective::CollectiveBuilder` interfaces +also accept these 2.x layout tags as input parameters in their template API as a convenience for users. +At every entry point into CUTLASS 3.0, these tags get converted to their corresponding CuTe Stride type with +metafunctions that best approximate their corresponding `cute::Stride`. + +* `cutlass::gemm::detail::TagToStrideA_t` +* `cutlass::gemm::detail::TagToStrideB_t` +* `cutlass::gemm::detail::TagToStrideC_t` + +By convention, and to match user expectations, the `cute::Stride` types that these +map onto always contain one static mode corresponding to the layout tag, and two 64-bit +dynamic stride modes corresponding to the minor mode and the batch mode. Batch +mode is included by default as all CUTLASS 3.0 kernels support packed batch-mode GEMMs +out of the box. + +The [`cutlass/gemm/gemm.h#440`](../../include/cutlass/gemm/gemm.h#440) +header file includes functions +that can be useful for converting +from CUTLASS 3.0 `cute::Stride`s back to CUTLASS 2.x layout tags. + +* `cutlass::gemm::detail::StrideToLayoutTagA_t` +* `cutlass::gemm::detail::StrideToLayoutTagB_t` +* `cutlass::gemm::detail::StrideToLayoutTagC_t` + +These metafunctions take the CuTe Stride as a template parameter and +attempt to find the size-1 stride in the idiomatic M, N, or K modes +to best approximate a corresponding 2.x layout tag type. +Note that this may not work in general for any `cute::Stride` +as the mapping between the stride and tag type is not bijective. + +These mapping utilities are kept in a `detail` namespace +as we do not guarantee stability of their implementation. +Their behavior may change in future releases as we add new features. +However, we do expect these type names to remain stable. For users who want +these 2.x reflective types from an assembled kernel with a more stable API, +the specialization of `cutlass::gemm::device::GemmUniversalAdapter` +for CUTLASS 3.0 kernel provides all aliases for all 2.x type aliases +in addition to the layout tags. You can see how they are used in the header file +[`cutlass/gemm/device/gemm_universal_adapter.h`](/include/cutlass/gemm/device/gemm_universal_adapter.h). +Here is an excerpt. + +```c++ + // Map back to 2.x type as best as possible + using LayoutA = gemm::detail::StrideToLayoutTagA_t; + using LayoutB = gemm::detail::StrideToLayoutTagB_t; + using LayoutC = gemm::detail::StrideToLayoutTagC_t; + using LayoutD = gemm::detail::StrideToLayoutTagC_t; + + // Legacy: Assume MultiplyAdd only since we do not use this tag type in 3.0 + using MathOperator = cutlass::arch::OpMultiplyAdd; + + // If our TiledMMA's instruction thread layout size is larger than 1, + // we know it's a tensorop + using OperatorClass = std::conditional_t< + (cute::size(typename GemmKernel::TiledMma::AtomThrID{}) > 1), + cutlass::arch::OpClassTensorOp, cutlass::arch::OpClassSimt>; + + // Assume TiledMma's ShapeMNK is the same as 2.x's ThreadblockShape + using ThreadblockShape = cutlass::gemm::GemmShape< + cute::size<0>(TileShape{}), + cute::size<1>(TileShape{}), + cute::size<2>(TileShape{})>; + + using ClusterShape = cutlass::gemm::GemmShape< + cute::size<0>(typename GemmKernel::DispatchPolicy::ClusterShape{}), + cute::size<1>(typename GemmKernel::DispatchPolicy::ClusterShape{}), + cute::size<2>(typename GemmKernel::DispatchPolicy::ClusterShape{})>; + + // We get the instruction shape directly from our TiledMma's atom shape + using InstructionShape = cutlass::gemm::GemmShape< + cute::size<0>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{}), + cute::size<1>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{}), + cute::size<2>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{})>; + + static int constexpr kStages = CollectiveMainloop::DispatchPolicy::Stages; + static int const kThreadCount = GemmKernel::MaxThreadsPerBlock; + + // Warp shape is not a primary API type in 3.x, + // but we can best approximate it by inspecting the TiledMma::TiledShape_MNK. + // For this, we make the assumption that we always have 4 warps along M, + // and the rest along N, with none along K. We also always round up + // the warp count to 4 if the tiled mma is smaller than 128 threads. + static constexpr int WarpsInMma = std::max(4, cute::size(typename GemmKernel::TiledMma{}) / 32); + static constexpr int WarpsInMmaM = 4; + static constexpr int WarpsInMmaN = cute::ceil_div(WarpsInMma, WarpsInMmaM); + using WarpCount = cutlass::gemm::GemmShape; + using WarpShape = cutlass::gemm::GemmShape< + cute::size<0>(typename CollectiveMainloop::TiledMma::TiledShape_MNK{}) / WarpsInMmaM, + cute::size<1>(typename CollectiveMainloop::TiledMma::TiledShape_MNK{}) / WarpsInMmaN, + cute::size<2>(typename CollectiveMainloop::TiledMma::TiledShape_MNK{})>; + + // Inspect TiledCopy for A and B to compute the alignment size + static int constexpr kAlignmentA = gemm::detail::get_alignment_count_from_gmem_tiled_copy< + typename CollectiveMainloop::GmemTiledCopyA, ElementA>(); + static int constexpr kAlignmentB = gemm::detail::get_alignment_count_from_gmem_tiled_copy< + typename CollectiveMainloop::GmemTiledCopyB, ElementB>(); +``` + +CUTLASS's library and profiler use these reflective interfaces to +obtain the kernel's configuration parameters. Users can use these to approximate the CUTLASS 2.x types +for 3.0 API kernels. However, the reflective interfaces cannot always match the types exactly, +as the mappings are not always bijective. + +# Copyright + +Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cutlass_3x_design.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cutlass_3x_design.md new file mode 100644 index 0000000000000000000000000000000000000000..9db3359d26a7beec5ebf234c7162d9301eed1de8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/cutlass_3x_design.md @@ -0,0 +1,117 @@ +[README](/README.md#documentation) > **CUTLASS 3.0 Design and Hierarchy** + +# CUTLASS 3.0 Design + +CUTLASS 3.0 is a major enhancement over the abstractions of CUTLASS 2.x +and aims to make usage of all layers of the GEMM hierarchy easier and more composable +while still achieving peak performance on Hardware. + +## CUTLASS 3.0 design goals + +CUTLASS 3.0 has the following design goals, in no particular order. + +- Simplify expressing and manipulating data and thread layouts across + the GEMM hierarchy with CuTe layouts and layout algebra. + +- Improve code readability and learning curve by + reducing the number of named types. + +- Functional correctness by default, + actionable static asserts otherwise. + +- Single, clear points of performance tuning and custom kernel extensions. + +- Support for NVIDIA Hopper GPUs with great performance using + features such as Tensor Cores, tensor memory accelerator, and thread block clusters. + +## A new Conceptual GEMM Hierarchy + +CUTLASS 2.x decomposes the moving parts of a GEMM operation +across a hierarchy that closely mirrors the organization of GPU +architectures. This discussed in detail within the +[CUTLASS 2.x GEMM API documentation](/media/docs/gemm_api.md). +This design, however, sometimes results in a coupling that is too tight +to extend to newer GPU features that might not fit into the same architectural +hierarchy. For instance, Hopper's warp-group wide instructions do not naturally +fit into any warp or thread layer GEMM concept in CUTLASS 2.x. Even for Volta tensor cores, +instructions that atomically exist at the quad-pair granularity are first tiled at +the warp level before use. This hints at the brittleness of the abstraction power. + +CUTLASS 3.0 detaches its interface layers from the hardware, +centering them instead around the natural structure of GEMM algorithms +not tied to any particular GPU generation. +This makes CUTLASS's code more robust to GPU architecture evolution, +less prone to implementation detail leakage, and provides users +with a consistent interface to hardware acceleration regardless of +the architecture specific details. + +The new conceptual GEMM hierarchy is discussed in detail in the dedicated +[CUTLASS 3.0 GEMM API documentation readme](/media/docs/gemm_api_3x.md), +along with code examples of the core concepts and types. + +## Adoption of CuTe Layout and Tensors + +CUTLASS 3.0 introduces a new core library, CuTe, to describe and manipulate tensors of threads and data. +CuTe is a collection of C++ CUDA template abstractions for defining and operating on hierarchically multidimensional layouts of threads and data. CuTe provides `Layout` and `Tensor` objects that compactly packages the type, shape, memory space, and layout of data, while performing the complicated indexing for the user. + +CUTLASS 3.0 adopts CuTe throughout the GEMM hierarchy in its templates, greatly simplifying the design, +improving code composability, and readability. More documentation specific to CuTe can be found in its [dedicated documentation directory](/media/docs/cute/00_quickstart.md). + +![CuTe helps reduce named iterator types down to a single vocabulary type, `Layout`](/media/images/cutlass-reduction-in-named-iterators.png) + +Programming massively parallel systems with various layers of logical thread and data hierarchies is not a trivial task. + +- `cute::Layout`s always maintain logical consistency of their coordinates, + allowing us to check pre- and post-conditions at compile time for all static inner loops. +- Explicit thread to data mapping allows users and kernel authors to inspect and reason about operations + from a single point in the source code. +- Layouts provide a single point of performance tuning, as most optimizations can be done by careful + selection of thread and data layouts. +- Formalized algebra makes manipulation of and reasoning about thread->data mapping explicit in source code. +- Single vocabulary type (`cute::Layout`) subsumes every iterator and layout in CUTLASS 2.x CUTLASS 2.x uses many bespoke thread maps, iterators, and data layouts. Iterators are fundamentally 1-D, whereas most layouts we encounter in the GPU hierarchy are fundamentally n-D. + +## Reducing the number of named types and iterator concepts + +CUTLASS 2.x design preferred introducing bespoke named types for each +architecture specific thread and data layout. For instance, `gemm::treadblock` namespace +contains implementation for `MmaMultistage`, `MmaPlanarComplexMultistage`, `MmaPipelined` etc. +despite them providing mainloops for GEMMs. To spell these types the same way in generic code, +CUTLASS 2.x provides aliases through its `default_x_configuration.h` files, however, +these aliases make the code much harder to read as the user has to perform type substitution +mentally in order to understand the codebase. + +CUTLASS 3.0 greatly reduces the number of named types used throughout by + +- Replacing all iterator concepts for all memory domains with `cute::Tensor`s +- Dispatching mainloop and epilogue implementations on tag-dispatch policies rather than naming new types +- Dispatching kernel layer schedules on tag-dispatch policies rather than naming new types + +Reducing the number of named types has many benefits: + +- It *makes writing generic code easier*, as the primary type names share the same lexical + without aliasing through configuration providers. +- It *flattens the learning curve of CUTLASS* by greatly reducing the mental context required + as the library only exposes a handful of named types. +- It *provides a clear, singular extension point* for users to plug in their customizations + through the dispatch policies. + +## Correctness by default, Performance through clear, individual points of tuning + +CUTLASS 2.x maintained its thread layouts as implicit indexing math implemented +as a part of 1D iterators. This meant that the thread to data layout mapping +was implicit in the imperative structure of the C++ code itself and did not have +a formal algebra we could use to manipulate these mappings. Each iterator +had to re-implement its indexing and mapping logic. This made it hard to learn +how this mapping was performed for existing iterators, and even harder to +implement custom layout functions for the core inner loops of a GEMM. + +CUTLASS 3.0 replaces all iterator concepts from CUTLASS 2.x +with a single layout type for thread and data tensors. +CuTe's formalized layout algebra is then used at every layer of +the GEMM hierarchy to manipulate the mapping between the two. +CuTe layouts always maintain logical consistency, and for fully static layouts +(such as in the core unrolled inner loops), provide +compile time checks that break builds if this consistency is violated. +In this way, CuTe reifies the thread-to-data-layout mapping, +makes it easier to write code that is "correct by construction". +If the code compiles, it's probably correct. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/doxygen_mainpage.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/doxygen_mainpage.md new file mode 100644 index 0000000000000000000000000000000000000000..414574816432a938ea34bf87d0b6a0af7fb7a73f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/doxygen_mainpage.md @@ -0,0 +1,69 @@ +# CUTLASS 3.0 + +_CUTLASS 3.0 - January 2023_ + +CUTLASS is a collection of CUDA C++ template abstractions for implementing +high-performance matrix-multiplication (GEMM) at all levels and scales within CUDA. +It incorporates strategies for hierarchical decomposition and data movement similar +to those used to implement cuBLAS. CUTLASS decomposes these "moving parts" into +reusable, modular software components abstracted by C++ template classes. These +components can be specialized +and tuned via custom tiling sizes, data types, and other algorithmic policies. The +resulting flexibility simplifies their use as building blocks within custom kernels +and applications. + +To support a wide variety of applications, CUTLASS provides extensive support for +mixed-precision computations, providing specialized data-movement and +multiply-accumulate abstractions for 8-bit integer, half-precision floating +point (FP16), single-precision floating point (FP32), and double-precision floating +point (FP64) types. Furthermore, CUTLASS exploits the _Tensor Cores_ and asynchronous +memory copy operations of the latest NVIDIA GPU architectures. + +# What's New in CUTLASS 3.0 + +For an overview of CUTLASS 3.0's GEMM interface levels, +please refer to the +[CUTLASS 3.0 GEMM API document](./gemm_api_3x.md). +To learn how to migrate code using CUTLASS 2.x's interface +to CUTLASS 3.0, please refer to the +[backwards compatibility document](./cutlass_3x_backwards_compatibility.md). + +# GEMM examples + +For a code example showing how to define +a GEMM kernel using CUTLASS, please refer to +[the quickstart guide](./quickstart.md). +The [`examples` directory](../../examples) +has a variety of examples. + +# Copyright + +Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/efficient_gemm.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/efficient_gemm.md new file mode 100644 index 0000000000000000000000000000000000000000..ddb9043c83972769920ada4390e2a70af3a1edb5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/efficient_gemm.md @@ -0,0 +1,288 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "Efficient GEMM in CUDA") + +[README](/README.md#documentation) > **Efficient GEMM in CUDA** + +# Efficient GEMM in CUDA + +CUTLASS implements the hierarchically blocked structure described in +[CUTLASS: Fast Linear Algebra in CUDA C++](https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/) +and the [CUTLASS GTC2018 talk](http://on-demand.gputechconf.com/gtc/2018/presentation/s8854-cutlass-software-primitives-for-dense-linear-algebra-at-all-levels-and-scales-within-cuda.pdf). + +## Hierarchical Structure + +The basic triple loop nest computing matrix multiply may be blocked and tiled to match +concurrency in hardware, memory locality, and parallel programming models. In CUTLASS, +GEMM is mapped to NVIDIA GPUs with the structure illustrated by the following loop nest. + +```c++ +for (int cta_n = 0; cta_n < GemmN; cta_n += CtaTileN) { // for each threadblock_y } threadblock-level concurrency + for (int cta_m = 0; cta_m < GemmM; cta_m += CtaTileM) { // for each threadblock_x } + + for (int cta_k = 0; cta_k < GemmK; cta_k += CtaTileK) { // "GEMM mainloop" - no unrolling + // - one iteration of this loop is one "stage" + // + for (int warp_n = 0; warp_n < CtaTileN; warp_n += WarpTileN) { // for each warp_y } warp-level parallelism + for (int warp_m = 0; warp_m < CtaTileM; warp_m += WarpTileM) { // for each warp_x } + // + for (int warp_k = 0; warp_k < CtaTileK; warp_k += WarpTileK) { // fully unroll across CtaTileK + // - one iteration of this loop is one "k Group" + // + for (int mma_k = 0; mma_k < WarpTileK; mma_k += MmaK) { // for each mma instruction } instruction-level parallelism + for (int mma_n = 0; mma_n < WarpTileN; mma_n += MmaN) { // for each mma instruction } + for (int mma_m = 0; mma_m < WarpTileM; mma_m += MmaM) { // for each mma instruction } + // + mma_instruction(d, a, b, c); // TensorCore matrix computation + + } // for mma_m + } // for mma_n + } // for mma_k + + } // for warp_k + } // for warp_m + } // for warp_n + + } // for cta_k + } // for cta_m +} // for cta_n +``` + +This tiled loop nest targets concurrency among +- threadblocks, +- warps, and +- CUDA and Tensor Cores. + +It takes advantage of memory locality within +- shared memory and +- registers. + +The figure below illustrates the flow of data within this structure. +This is the hierarchical GEMM computation embodied by CUTLASS. Each stage depicts a +nested level of tiling which corresponds to a layer of concurrency within the CUDA execution model and to a +level within the memory hierarchy, becoming increasingly finer moving left to right. + +![ALT](/media/images/gemm-hierarchy-with-epilogue.png "Hierarchical GEMM in CUDA") + + +### Threadblock-level GEMM + +Each threadblock computes its portion of the output GEMM by iteratively loading tiles of input +matrices and computing an accumulated matrix product. At the threadblock level, data are loaded from +global memory. The blocking strategy in general is key to achieving efficiency. However, the programmer +must balance multiple conflicting goals. A +larger threadblock means fewer fetches from global memory, thereby ensuring that DRAM bandwidth +does not become a bottleneck. +However, large threadblock tiles may not match the dimensions of the problem well. If either the +GEMM _M_ or _N_ dimension is small, some threads within the threadblock may not perform meaningful +work, as the threadblock may be partially outside the bounds of the problem. If both _M_ and _N_ +are small while _K_ is large, this scheme may launch relatively few threadblocks and fail to +make full use of all multiprocessors within the GPU. Strategies to optimize performance for this case, +as described in the section [Parallelized Reductions](efficient_gemm.md#parallelized-reductions), +partition the GEMM K dimension across multiple threadblocks or multiple warps. These threadblocks +or warps compute matrix products in parallel; the products are then reduced to compute the result. + +In CUTLASS, the dimensions of the threadblock tile are specified as `ThreadblockShape::{kM, kN, kK}` +and may be tuned to specialize the GEMM computation for the target processor and dimensions of +the GEMM problem. + + +### Warp-level GEMM + +The warp-level GEMM maps to the warp-level parallelism within the CUDA execution model. Multiple +warps within a threadblock fetch data from shared memory into registers and perform computations. +Warp-level GEMMs may be implemented either by TensorCores issuing +[mma.sync](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-mma) +or [wmma](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-wmma-mma) +instructions, or by thread-level matrix computations issued to CUDA cores. +For maximum performance, access to shared memory should be bank conflict free. To maximize data +reuse within the warp, a large warp-level GEMM tile should be chosen. + + +### Thread-level GEMM + +At the lowest level of blocking, each thread is responsible for processing a certain number of +elements. Threads cannot access each other's registers, so we choose an organization that enables +reuse of values held in registers for multiple math instructions. This results in a 2D tiled +structure within a thread, in which each thread issues a sequence of independent math instructions +to the CUDA cores and computes an accumulated outer product. + +SGEMM, IGEMM, HGEMM, and DGEMM are computed by SIMT math instructions issued by thread-level matrix multiply +procedures. + + +## Epilogue + +The above code focuses only on the matrix multiply computation **C = AB** whose result is +held in the registers of each thread within the threadblock. The mapping of logical elements +in the output tile to each thread is chosen to maximize performance of the matrix multiply +computation but does not result in efficient, coalesced loads and stores to global memory. + +The epilogue is a separate phase in which threads exchange data through shared memory then +cooperatively access global memory using efficient striped access patterns. It is also +the phase in which linear scaling and other elementwise operations may be conveniently +computed using the matrix product results as inputs. + +CUTLASS defines several typical epilogue operations such as linear scaling and clamping, +but other device-side function call operators may be used to perform custom operations. + +## Optimizations + +The hierarchical structure described above yields an efficient mapping to the CUDA execution model and +CUDA/TensorCores in NVIDIA GPUs. The following sections describe strategies for obtaining peak performance +for all corners of the design space, maximizing parallelism and exploiting data locality wherever possible. + +### Pipelining + +The blocked structure demands a large storage allocation within the registers of each CUDA thread. The +accumulator elements typically occupy at least half a thread's total register budget. Consequently, +occupancy -- the number of concurrent threads, warps, and threadblocks -- is relatively low compared +to other classes of GPU workloads. This limits the GPU's ability to hide memory latency and other stalls +by context switching to other concurrent threads within an SM. + +To mitigate the effects of memory latency, CUTLASS uses *software pipelining* to overlap memory accesses +with other computation within a thread. CUTLASS accomplishes this by double buffering at the +following scopes. + +- **Threadblock-scoped shared memory tiles:** two tiles are allocated in shared memory. + One is used to load data for the current matrix operation, + while the other tile is used to buffer data loaded from global memory + for the next mainloop iteration. + +- **Warp-scoped matrix fragments:** two fragments are allocated within registers. + One fragment is passed to CUDA and TensorCores during the current matrix computation, + while the other is used to receive shared memory fetch returns + for the next warp-level matrix operation. + +The following diagram illustrates the efficient, pipelined mainloop body used in CUTLASS GEMMs. + +![ALT](/media/images/software-pipeline.png "Software pipeline in CUTLASS") + +### Threadblock Rasterization + +To maximize reuse of data held in the last level cache, CUTLASS defines several functions to +affect the mapping of threadblocks to logical partitions of the GEMM problem. These map +consecutively launched threadblocks to packed two-dimensional regions of the partitioned GEMM +problem to increase the probability that these will access the same tiles of global memory at +approximately the same time. + +Several functions are defined in [cutlass/gemm/threadblock_swizzle.h](/include/cutlass/gemm/threadblock/threadblock_swizzle.h). + + +### Parallelized Reductions + +**Split K - reduction across threadblocks** + +Matrix product computations expose parallelism among _O(MN)_ independent inner product +computations. For sufficiently large problem sizes, a GEMM kernel in CUTLASS may approach +the theoretical maximum computational throughput. For small problems, however, there are +too few threadblocks to efficiently occupy the entire GPU. + +As a recourse, parallelizing the reduction performed during the inner product computation +enables more threadblocks to execute concurrently while still taking advantage of the throughput +benefits of large threadblock-level GEMM tiles. + +CUTLASS implements parallel reductions across threadblocks by partitioning the GEMM _K_ dimension +and launching an additional set of threadblocks for each partition. Consequently, we refer to +this strategy within CUTLASS as "parallel reduction splitK." The "parallel reduction splitK" strategy +requires the execution of 2 kernels: partitionedK GEMM, and batched reduction. + +PartitionedK GEMM resembles one flavor of batched strided GEMM. Instead of requiring users +to specify the problem size of each batch, partitionedK GEMM asks for the overall problem size and the +number of partitions that will be applied along the K dimension for operands A and B. For example, +parameters of m=128, n=128, k=4096 and partition=16 will result in 16 batched strided GEMMs +with each batch of m=128, n=128, k=256. PartitionedK also allows scenario where k is not divisible +by the partition count. + +For example, parameters of m=128, n=128, k=4096 and partition=20 +will result in 20 batched strided GEMMs. +The first 19 batches will have m=128, n=128, and k=4096/20=204, +and the last batch will have m=128, n=128, and k=220. + +The batched reduction kernel takes as input the output (C) of partitionedK GEMM, +and performs a reduction along the K-dimension. +Users must manage workspace memory to store this intermediate result. + +**Sliced K - reduction across warps** + +Similar to the split-k scenario, sliced-k aims at improving the efficiency of kernels +with smaller M and N dimensions, but large K dimension. +At the thread-block level, the parameters CtaTileN and CtaTileM expose parallelism +by partitioning the work among warps. +Larger warpTiles expose better instruction-level parallelism (ILP) and reuse, +but also limit the number of warps running per threadblock, which reduces efficiency. + +In order to improve efficiency in such scenarios, partitioning the warpTiles also along ctaTileK +helps use the hardware more efficiently by allowing more warps to run concurrently in a CTA. +Sliced-k kernels break down a threadblock's computation among participating warps +not just among the CtaTileN, CtaTileM dimension, but also the CtaTileK dimension. +Thus, sliced-k entails a small cost in form of a reduction +which has to happen at the end among the participating warps. +This is because each warp computes using only a "slice" of CtaTileK, +so each warp only has a partial sum before the reduction. + +### Warp Specialization + +Starting with Hopper, CUTLASS 3.0 incorporates the concept of [Warp Specialization](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#spatial-partitioning-also-known-as-warp-specialization) +as part of the kernel design. A thread block is partitioned into two sets of warps, [*producer* warp group](/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp) and [*consumer* warp group](/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp). The *producer* warp group loads data from global memory into shared memory buffers using the new [Tensor Memory Accelerator (TMA)](https://developer.nvidia.com/blog/nvidia-hopper-architecture-in-depth/). + +[*Producer* warp group (DMA)](/include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss_warpspecialized.hpp) waits for the shared memory buffers to be signaled as [empty](/include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss_warpspecialized.hpp) by the *consumer* warp group using the newly added **Async Pipeline class** ([refer](/media/docs/pipeline.md)). Once the data is written into the shared memory, TMA is also updates the barrier associated with that stage to notify affected threads that the buffer has been [filled](/include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss_warpspecialized.hpp). The [*Consumer* warp group (MMA)](/include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss_warpspecialized.hpp) on the other hand waits for the *producer* warp group to signal that the buffer is [filled](/include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss_warpspecialized.hpp) and then launches tensor core MMA operations. Finally, the *consumer* warp group [releases](/include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss_warpspecialized.hpp) the buffers for the next set of TMA loads to happens. + +**Warp-Specialized Persistent Cooperative kernel design** + +Another flavor of Warp-Specialized kernel design being introduced starting with Hopper is the [*Warp-Specialized Persistent Cooperative*](/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_cooperative.hpp) kernel. Like the Warp-Specialized kernel, the concepts of warp groups and barrier synchronization between warp groups remain the same in the cooperative design. +The distinctive feature of the Warp-Specialized Persistent Cooperative kernel are the following : +* Persistent thread blocks launched to occupy as many SMs as mentioned in the [KernelHardwareInfo](/include/cutlass/kernel_hardware_info.hpp) struct. These persistent thread blocks are used to tile the output and thus (potentially) compute multiple output tiles through their lifetime. The main benefit this adds is amortization of the thread-block launch and kernel prologue overheads which are typical of all kernels. +* Presence of two *consumer* warp groups cooperating on the same output tile by splitting the tile in half across the M dimension. This allows for larger tile sizes to be enabled - since the register pressure per *consumer* warp group is reduced - and hence improving performance. + +Since each thread block now computes multiple output tiles, the shape of the grid launch and the scheduling of tiles to the thread blocks is managed using the new [*Tile Scheduler*](/include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp). The *Tile Scheduler* considers the shape of the *clusters* as well as the available number of available SMs to compute a valid scheduling of the output tiles to launched thread blocks. + +**Warp-Specialized Persistent Ping-Pong kernel design** + +The third kernel design is the [*Warp-Specialized Persistent Ping-Pong*](/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_pingpong.hpp) kernel. +Like the Warp-Specialized Persistent Cooperative, kernel the concepts of warp groups, barrier synchronization between warp groups, and the shape of the grid launch remain the same in the persistent ping-pong design. +The distinctive feature of the Warp-Specialized Persistent Ping-Pong kernel is the following : +* The two *consumer* warp groups are assigned a different output tile using the Tile Scheduler. This allows for *epilogue* of one *consumer* warp group to be overlapped with the math operations of the other *consumer* warp group - thus maximizing tensor core utilization. +* The *producer* warp group synchronizes using the [Ordered Sequence Barrier](/include/cutlass/pipeline.hpp) to fill buffers of the two *consumer* warp groups one after the other in order. + +# Resources + +The following additional resources describe design and implementation details of GEMMs +targeting NVIDIA GPUs. + +- [Developing CUDA Kernels to Push Tensor Cores to the Absolute Limit on NVIDIA A100.](https://www.nvidia.com/en-us/gtc) (SR 21745) +- [CUTLASS: Fast Linear Algebra in CUDA C++](https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/) +- [CUTLASS: SOFTWARE PRIMITIVES FOR DENSE LINEAR ALGEBRA AT ALL LEVELS AND SCALES WITHIN CUDA](https://on-demand-gtc.gputechconf.com/gtcnew/sessionview.php?sessionName=s8854-cutlass%3a+software+primitives+for+dense+linear+algebra+at+all+levels+and+scales+within+cuda) +- [Programming Tensor Cores: NATIVE VOLTA TENSOR CORES WITH CUTLASS](https://developer.download.nvidia.com/video/gputechconf/gtc/2019/presentation/s9593-cutensor-high-performance-tensor-operations-in-cuda-v2.pdf) +- [CUDA Programming Guide: warp matrix functions](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#wmma) +- [Matrix Multiply Accumulate Instructions](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-mma) + +# Copyright + +Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/functionality.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/functionality.md new file mode 100644 index 0000000000000000000000000000000000000000..fea258f4ab9efbd61c73e2e48668cca108e97ed4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/functionality.md @@ -0,0 +1,312 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Functionality") + +[README](/README.md#documentation) > **Functionality** + +# Functionality + +Note : CUTLASS-3 requires users to use CUDA 11.4 or newer, and SM70 or newer, for the target toolkit and architecture, respectively. +Please refer to the [Compatibility](/README.md#Compatibility) section for more details. + +- N - Column Major Matrix +- T - Row Major matrix +- {N,T} x {N,T} - All combinations, i.e., NN, NT, TN, TT +- [NHWC](/include/cutlass/layout/tensor.h#L63-206) - 4 dimension tensor used for convolution +- [NCxHWx](/include/cutlass/layout/tensor.h#L290-395) - Interleaved 4 dimension tensor used for convolution +- f - floating point +- s - signed int +- b - bit +- cf - complex float +- bf16 - bfloat16 +- tf32 - tfloat32 +- Simt - Use Simt CUDA Core MMA +- TensorOp - Use Tensor Core MMA +- SpTensorOp - Use Sparse Tensor Core MMA +- WmmaTensorOp - Use WMMA abstraction to use Tensor Core MMA + +## Device-level GEMM + +The following tables summarize device-level GEMM kernels in CUTLASS, organized by opcode class, data type, and layout. +Hyperlinks to relevant unit tests demonstrate how specific template instances may be defined. + +### CUTLASS 3.x Kernels + +|**Opcode Class** | **Compute Capability** | **CUDA Toolkit** | **Data Type** | **Layouts** | **Unit Test** | +|-----------------|------------------------|------------------|--------------------------------|------------------------|------------------| +| **TensorOp** | 90a | 12.0+ | `f16 * f16 + { f16, f32 } => { f16, f32 }` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized.cu) | +| **TensorOp** | 90a | 12.0+ | `bf16 * bf16 + { f16, f32 } => { bf16, f32 }`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/sm90_gemm_bf16_bf16_bf16_tensor_op_f32.cu) | +| **TensorOp** | 90a | 12.0+ | `{f32, tf32} * {f32, tf32} + f32 => f32`| { T } x { N } => {N,T} | [example](/test/unit/gemm/device/sm90_gemm_f32_f32_f32_tensor_op_f32.cu) | +| **TensorOp** | 90a | 12.0+ | `s8 * s8 + s32 => {s32, s8}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/sm90_gemm_s8_s8_s8_tensor_op_s32.cu) | + + +### CUTLASS 2.x Kernels + +|**Opcode Class** | **Compute Capability** | **CUDA Toolkit** | **Data Type** | **Layouts** | **Unit Test** | +|-----------------|------------------------|------------------|--------------------------------|------------------------|------------------| +| **Simt** | 50+ | 11.4+ | `f32 * f32 + f32 => f32` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/simt_sgemm_nt_sm50.cu) | +| **Simt** | 50+ | 11.4+ | `f64 * f64 + f64 => f64` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/simt_dgemm_nt_sm50.cu) | +| **Simt** | 60+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/simt_hgemm_nt_sm50.cu) | +| **Simt** | 61+ | 11.4+ | `s8 * s8 + s32 => {s32,s8}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/simt_igemm_nt_sm50.cu) | +| **WmmaTensorOp** | 70+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16t_f16t_f16n_wmma_tensor_op_f16_sm70.cu) | +| **WmmaTensorOp** | 70+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16t_f16t_f16n_wmma_tensor_op_f32_sm70.cu) | +| **WmmaTensorOp** | 75+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_s8t_s8n_s8t_wmma_tensor_op_s32_sm72.cu) | +| **WmmaTensorOp** | 75+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_s4t_s4n_s4t_wmma_tensor_op_s32_sm75.cu) | +| **WmmaTensorOp** | 75+ | 11.4+ | `b1 ^ b1 + s32 => {s32, b1}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_b1t_b1n_b1t_wmma_tensor_op_s32_sm75.cu) | +| **TensorOp** | 70+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_volta_tensor_op_f16_sm70.cu) | +| **TensorOp** | 70+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_volta_tensor_op_f32_sm70.cu) | +| **TensorOp** | 75+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sm75.cu) | +| **TensorOp** | 75+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f32_sm75.cu) | +| **TensorOp** | 75+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_s8t_s8n_s32n_tensor_op_s32_sm75.cu) | +| **TensorOp** | 75+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_s4t_s4n_s32n_tensor_op_s32_sm75.cu) | +| **TensorOp** | 75+ | 11.4+ | `b1 ^ b1 + s32 => {s32, b1}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_b1t_b1n_s32n_tensor_op_s32_sm75.cu) | +| **TensorOp** | 80+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sm80.cu) | +| **TensorOp** | 80+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f32_sm80.cu) | +| **TensorOp** | 80+ | 11.4+ | `bf16 * bf16 + f32 => {bf16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_bf16n_bf16t_bf16t_tensor_op_f32_sm80.cu) | +| **TensorOp** | 80+ | 11.4+ | `tf32 * tf32 + f32 => f32`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f32n_f32t_f32t_tensor_op_f32_sm80.cu) | +| **TensorOp** | 80+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_s8t_s8n_s32n_tensor_op_s32_sm80.cu) | +| **TensorOp** | 80+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_s4t_s4n_s32n_tensor_op_s32_sm80.cu) | +| **TensorOp** | 80+ | 11.4+ | `b1 ^ b1 + s32 => {s32, b1}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_b1t_b1n_s32n_tensor_op_s32_sm80.cu) | +| **TensorOp** | 80+ | 11.4+ | `f64 * f64 + f64 => f64` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm80.cu) | +| **TensorOp** | 80+ | 11.4+ | `cf32 * cf32 + cf32 => cf32` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_cf32n_cf32t_cf32t_tensor_op_tf32_f32_sm80.cu) | +| **TensorOp** | 80+ | 11.4+ | `cf64 * cf64 + cf64 => cf64` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_cf64n_cf64t_cf64t_tensor_op_f64_sm80.cu), [Gaussian 3m](/test/unit/gemm/device/gemm_cf64n_cf64t_cf64t_tensor_op_f64_gaussian_sm80.cu) | +| **SpTensorOp** | 80+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sparse_sm80.cu) | +| **SpTensorOp** | 80+ | 11.4+ | `bf16 * bf16 + f32 => {bf16, f32}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sparse_sm80.cu) | +| **SpTensorOp** | 80+ | 11.4+ | `tf32 * tf32 + f32 => f32` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f32n_f32n_f32t_tensor_op_f32_sparse_sm80.cu) | +| **SpTensorOp** | 80+ | 11.4+ | `s8 * s8 + s32 => {s8, s32}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_s8t_s8n_s32t_tensor_op_s32_sparse_sm80.cu) | +| **SpTensorOp** | 80+ | 11.4+ | `s4 * s4 + s32 => {s4, s32}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_s4t_s4n_s32t_tensor_op_s32_sparse_sm80.cu) | +| **TensorOp** | 90+ | 11.8+ | `f64 * f64 + f64 => f64` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm90.cu) | + + +## Device-level Implicit GEMM convolution + +The following table summarizes device-level implicit GEMM convolution kernels in CUTLASS, organized by opcode class, data type, and layout. +Hyperlinks to relevant conv2d fprop unit tests demonstrate how specific template instances may be defined. +One can find and/or create equivalent dgrad and wgrad convolutional operators. + +|**Opcode Class** | **Compute Capability** | **CUDA Toolkit** | **Data Type** | **Layouts** | **Unit Test** | +|-----------------|------------------------|------------------|--------------------------------|------------------|------------------| +| **Simt** | 50+ | 11.4+ | `f32 * f32 + f32 => f32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm50.cu) | +| **Simt** | 50+ | 11.4+ | `cf32 * cf32 + cf32 => cf32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_cf32nhwc_cf32nhwc_cf32nhwc_simt_f32_sm50.cu) | +| **TensorOp** | 70+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm70.cu) | +| **TensorOp** | 75+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm75.cu) | +| **TensorOp** | 75+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | NHWC, NCxHWx | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s8nhwc_s8nhwc_s32nhwc_tensor_op_s32_sm75.cu), [ncxhwx](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s8ncxhwx_s8cxrskx_s8ncxhwx_tensor_op_s32_sm75.cu) | +| **TensorOp** | 75+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | NHWC, NCxHWx | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4nhwc_s4nhwc_s32nhwc_tensor_op_s32_sm75.cu), [ncxhwx](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4ncxhwx_s4cxrskx_s4ncxhwx_tensor_op_s32_sm75.cu) | +| **Simt** | 80+ | 11.4+ | `f32 * f32 + f32 => f32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu) | +| **Simt** | 80+ | 11.4+ | `cf32 * cf32 + cf32 => cf32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_cf32nhwc_cf32nhwc_cf32nhwc_simt_f32_sm80.cu) | +| **TensorOp** | 80+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu) | +| **TensorOp** | 80+ | 11.4+ | `f16 * f16 + f16 => f16` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu) | +| **TensorOp** | 80+ | 11.4+ | `tf32 * tf32 + f32 => f32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu) | +| **TensorOp** | 80+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | NHWC, NCxHWx | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s8nhwc_s8nhwc_s32nhwc_tensor_op_s32_sm80.cu), [ncxhwx](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s8ncxhwx_s8cxrskx_s8ncxhwx_tensor_op_s32_sm80.cu) | +| **TensorOp** | 80+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | NHWC, NCxHWx | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4nhwc_s4nhwc_s32nhwc_tensor_op_s32_sm80.cu), [ncxhwx](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4ncxhwx_s4cxrskx_s4ncxhwx_tensor_op_s32_sm80.cu) | + + + +## Warp-level Matrix Multiply with Tensor Cores + +The following table summarizes supported warp level shapes for each TensorOp instruction. + +|**Opcode Class** | **Instruction Shape** | **Warp Shapes** | +|-----------------|-----------------------|--------------------------------------------| +| **TensorOp** | 8-by-8-by-4 | 32x32x4, 32x64x4, 64x32x4, 64x64x4 | +| **TensorOp** | 16-by-8-by-8 | 32x32x8, 32x64x8, 64x32x8, 64x64x8 | +| **TensorOp** | 16-by-8-by-16 | 32x32x16, 32x64x16, 64x32x16, 64x64x16 | +| **TensorOp** | 8-by-8-by-16 | 32x32x16, 32x64x16, 64x32x16, 64x64x16 | +| **TensorOp** | 8-by-8-by-32 | 32x32x32, 32x64x32, 64x32x32, 64x64x32 | +| **TensorOp** | 16-by-8-by-32 | 32x32x32, 32x64x32, 64x32x32, 64x64x32 | +| **TensorOp** | 16-by-8-by-64 | 32x32x64, 32x64x64, 64x32x64, 64x64x64 | +| **TensorOp** | 8-by-8-by-128 | 32x32x128, 32x64x128, 64x32x128, 64x64x128 | +| **TensorOp** | 16-by-8-by-256 | 32x32x256, 32x64x256, 64x32x256, 64x64x256 | +| **SpTensorOp** | 16-by-8-by-16 | 64x64x16, 64x32x16, 32x64x16, 32x32x16 | +| **SpTensorOp** | 16-by-8-by-32 | 64x64x32, 64x32x32, 32x64x32, 32x32x32 | +| **SpTensorOp** | 16-by-8-by-64 | 64x64x64, 64x32x64, 32x64x64, 32x32x64 | +| **SpTensorOp** | 16-by-8-by-128 | 64x64x128, 64x32x128, 32x64x128, 32x32x128 | + + +TensorOp instructions depend on a permuted shared memory layout that can be efficiently +loaded from. The following tables summarize the destination shared memory layout that +can be targeted by matrix operands. It is assumed that each thread loads 128b vectors +from global memory with layout specified in the column "GMEM Layout." + +**TensorOp 8-by-8-by-4.** + +|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** | +|-----------|--------------|-----------------|-----------------------------------------| +| **A** | `half_t` | `ColumnMajor` | `ColumnMajorVoltaTensorOpCongruous<16>` | +| **A** | `half_t` | `RowMajor` | `RowMajorVoltaTensorOpCrosswise<16>` | +| **B** | `half_t` | `ColumnMajor` | `ColumnMajorVoltaTensorOpCrosswise<16>` | +| **B** | `half_t` | `RowMajor` | `RowMajorVoltaTensorOpCongruous<16>` | +| **C** | `half_t` | `RowMajor` | `RowMajor` | +| **C** | `float` | `RowMajor` | `RowMajor` | + +**TensorOp 16-by-8-by-8.** + +|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** | +|-----------|--------------|-----------------|------------------------------------| +| **A** | `half_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<16>` | +| **A** | `half_t` | `RowMajor` | `RowMajorTensorOpCrosswise<16>` | +| **B** | `half_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<16>` | +| **B** | `half_t` | `RowMajor` | `RowMajorTensorOpCongruous<16>` | +| **C** | `half_t` | `RowMajor` | `RowMajor` | +| **C** | `float` | `RowMajor` | `RowMajor` | + +**TensorOp 16-by-8-by-8.** + +|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** | +|-----------|--------------|-----------------|------------------------------------| +| **A** | `tfloat32_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<32>` | +| **A** | `tfloat32_t` | `RowMajor` | `RowMajorTensorOpCrosswise<32>` | +| **B** | `tfloat32_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<32>` | +| **B** | `tfloat32_t` | `RowMajor` | `RowMajorTensorOpCongruous<32>` | +| **C** | `float` | `RowMajor` | `RowMajor` | + + +**TensorOp 16-by-8-by-16.** + +|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** | +|-----------|--------------|-----------------|------------------------------------| +| **A** | `half_t`, `bfloat16_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<16>` | +| **A** | `half_t`, `bfloat16_t` | `RowMajor` | `RowMajorTensorOpCrosswise<16>` | +| **B** | `half_t`, `bfloat16_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<16>` | +| **B** | `half_t`, `bfloat16_t` | `RowMajor` | `RowMajorTensorOpCongruous<16>` | +| **C** | `half_t` | `RowMajor` | `RowMajor` | +| **C** | `float` | `RowMajor` | `RowMajor` | + +**TensorOp 8-by-8-by-4.** + +|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** | +|-----------|--------------|-----------------|------------------------------------| +| **A** | `double` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<64>` | +| **A** | `double` | `RowMajor` | `RowMajorTensorOpCrosswise<64>` | +| **B** | `double` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<64>` | +| **B** | `double` | `RowMajor` | `RowMajorTensorOpCongruous<64>` | +| **C** | `double` | `RowMajor` | `RowMajor` | + +**TensorOp 8-by-8-by-16.** + +|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** | +|-----------|--------------|-----------------|------------------------------------| +| **A** | `int8_t` | `RowMajor` | `RowMajorTensorOpCrosswise<8>` | +| **B** | `int8_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<8>` | +| **C** | `int32_t` | `RowMajor` | `RowMajor` | + +**TensorOp 16-by-8-by-32.** + +|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** | +|-----------|--------------|-----------------|------------------------------------| +| **A** | `int8_t` | `RowMajor` | `RowMajorTensorOpCrosswise<8>` | +| **B** | `int8_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<8>` | +| **C** | `int32_t` | `RowMajor` | `RowMajor` | + +**TensorOp 8-by-8-by-32.** + +|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** | +|-----------|--------------|-----------------|------------------------------------| +| **A** | `int4b_t` | `RowMajor` | `RowMajorTensorOpCrosswise<4>` | +| **B** | `int4b_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<4>` | +| **C** | `int32_t` | `RowMajor` | `RowMajor` | + +**TensorOp 16-by-8-by-64.** + +|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** | +|-----------|--------------|-----------------|------------------------------------| +| **A** | `int4b_t` | `RowMajor` | `RowMajorTensorOpCrosswise<4>` | +| **B** | `int4b_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<4>` | +| **C** | `int32_t` | `RowMajor` | `RowMajor` | + +**TensorOp 8-by-8-by-128.** + +|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** | +|-----------|--------------|-----------------|------------------------------------| +| **A** | `bin1_t` | `RowMajor` | `RowMajorTensorOpCrosswise<4>` | +| **B** | `bin1_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<4>` | +| **C** | `int32_t` | `RowMajor` | `RowMajor` | + + +**SpTensorOp 16-by-8-by-16.** + +|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** | +|-----------|--------------|-----------------|------------------------------------| +| **A** | `tfloat32_t` | `RowMajor` | `RowMajorTensorOpCrosswise<32, 32>` | +| **B** | `tfloat32_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<32, 32>`| +| **C** | `float` | `RowMajor` | `RowMajor` | + +**SpTensorOp 16-by-8-by-32.** + +|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** | +|-----------|--------------|-----------------|---------------------------------------| +| **A** | `half_t` | `RowMajor` | `RowMajorTensorOpCrosswise<16, 64>` | +| **B** | `half_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<16, 64>`| +| **C** | `float` | `RowMajor` | `RowMajor` | + +**SpTensorOp 16-by-8-by-64.** + +|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** | +|-----------|--------------|-----------------|---------------------------------------| +| **A** | `int8_t` | `RowMajor` | `RowMajorTensorOpCrosswise<8, 128>` | +| **B** | `int8_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<8, 128>`| +| **C** | `int32_t` | `RowMajor` | `RowMajor` | + +**SpTensorOp 16-by-8-by-128.** + +|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** | +|-----------|--------------|-----------------|------------------------------------| +| **A** | `int4b_t` | `RowMajor` | `RowMajorTensorOpCrosswise<4, 256>` | +| **B** | `int4b_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<4, 256>`| +| **C** | `int32_t` | `RowMajor` | `RowMajor` | + + + +## Warp-level Matrix Multiply with CUDA WMMA API + +The following table summarizes supported warp level shapes for each WmmaTensorOp instruction. + +|**Opcode Class** | **Instruction Shape** | **Warp Shapes** | +|---------------------|-----------------------|--------------------------------------------| +| **WmmaTensorOp** | 16-by-16-by-16 | 32x32x16, 32x64x16, 64x32x16 | +| **WmmaTensorOp** | 8-by-32-by-16 | 32x32x16, 32x64x16, 64x32x16 | +| **WmmaTensorOp** | 32-by-8-by-16 | 32x32x16, 32x64x16, 64x32x16 | +| **WmmaTensorOp** | 8-by-8-by-32 | 32x32x32, 32x64x32, 64x32x32, 64x64x32 | +| **WmmaTensorOp** | 8-by-8-by-128 | 32x32x128, 32x64x128, 64x32x128, 64x64x128 | + + +CUDA exposes warp-level matrix operations in the CUDA C++ WMMA API. The CUDA C++ WMMA API exposes Tensor Cores via a set of functions and types in the `nvcuda::wmma` namespace. The functions and types in `nvcuda::wmma` provide target-independent APIs and implement architecture-specific tensor operation using TensorOp instruction underneath. CUTLASS exposes WMMA API through WmmaTensorOp. The WmmaTensorOp supports canonical shared memory layouts. The following table summarizes the destination shared memory layout that can be targeted by matrix operands. The WMMA API expects that matrices in shared memory loaded by `nvcuda::wmma::load_matrix_sync()` satisfy 128 bit alignment. + + +**WmmaTensorOp (all matrix sizes and data types).** + +|**Operand** | **GMEM Layout** | **SMEM Layout** | +|------------|----------------------------|------------------------------| +| **A** | `RowMajor`, `ColumnMajor` | `RowMajor`, `ColumnMajor` | +| **B** | `RowMajor`, `ColumnMajor` | `RowMajor`, `ColumnMajor` | +| **C** | `RowMajor`, `ColumnMajor` | `RowMajor`, `ColumnMajor` | + +# Copyright + +Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/fundamental_types.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/fundamental_types.md new file mode 100644 index 0000000000000000000000000000000000000000..dc9a5736d6a6dafc606419a02493ddc421c74d3e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/fundamental_types.md @@ -0,0 +1,377 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS") + +[README](/README.md#documentation) > **Fundamental Types** + +# Fundamental Types + +CUTLASS defies several fundamental numeric and container classes upon which computations and +algorithms algorithms for linear algebra computations are implemented. + +Where possible, CUTLASS fundamental types mirror the C++ Standard Library. However, there are circumstances that necessitate divergence from the Standard Library's specification. In such cases, the CUTLASS implementation adopts unique capitalization to distinguish that standard vocabulary types may not be safely substituted in all cases. + +Most types in CUTLASS are usable in both host code and device code. Moreover, they are functional regardless of compute capability, but they may only be efficient when hardware support is present. + +## Numeric Types + +CUTLASS defines classes for the following numeric data types. + +* `half_t`: IEEE half-precision floating point (exponent: 5b, mantissa: 10b; literal suffix `_hf`) +* `bfloat16_t`: BFloat16 data type (exponent: 8b, mantissa: 7b; literal suffix `_bf16`) +* `tfloat32_t`: Tensor Float 32 data type (exponent: 8b, mantissa: 10b; literal suffix `_tf32`) +* `int4_t`, `uint4_t`: 4b signed and unsigned integer (literal suffx `_s4`, `_u4`) +* `bin1_t`: 1b binary numeric type (literal suffix `_b1`) +* `complex`: defines complex-valued data type based on the supplied real-valued numeric type + +Numeric types in CUTLASS may be used in both host and device code and are intended to function +like any other plain-old-data type. + +If CUTLASS is compiled with `CUTLASS_F16C_ENABLED`, then hardware conversion is used for +half-precision types in host code. Regardless, `cutlass::half_t` uses the most efficient +NVIDIA GPU hardware instructions available in device code. + +Example: +```c++ +#include +#include + +__global__ void kernel(cutlass::half_t x) { + printf("Device: %f\n", float(x * 2.0_hf)); +} + +int main() { + + cutlass::half_t x = 0.5_hf; + + std::cin >> x; + + std::cout << "Host: " << 2.0_hf * x << std::endl; + + kernel<<< dim3(1,1), dim3(1,1,1) >>>(x); + + return 0; +} +``` + +## Containers + +CUTLASS uses the following containers extensively for implementing efficient CUDA kernels. + +### Array + +```c++ +template < + typename T, // element type + int N // number of elements +> +class Array; +``` + +`Array` defines a statically sized array of elements of type _T_ and size _N_. This class is similar to +[`std::array<>`](https://en.cppreference.com/w/cpp/container/array) in the Standard Library with two notable exceptions: +* constructors for each element may not be called +* partial specializations exist to pack or unpack elements smaller than one byte. + +`Array<>` is intended to be a convenient and uniform container class to store arrays of numeric elements regardless of data type or vector length. The storage needed is expected to be the minimum necessary given the logical size of each numeric type in bits (numeric types smaller than one byte are densely packed). Nevertheless, the size reported by `sizeof(Array)` is always an integer multiple of bytes. + +Storing numeric elements in a C++ STL-style container class enables useful modern C++ mechanisms such as range-based for loops. For example, to print the elements of `Array<>`, the following range-based for loop syntax is always valid regardless of numeric data type, compute capability, or context in host or device code. + +Example: +```c++ +int const kN; +Array elements; + +CUTLASS_PRAGMA_UNROLL // required to ensure array remains in registers +for (auto x : elements) { + printf("%d, %f", int64_t(x), double(x)); // explictly convert to int64_t or double +} +``` + +When copying `Array<>` objects or passing them as arguments to methods, it is best to avoid accessing individual elements. This enables the use of vector instructions to perform the operation more efficiently. For example, setting all elements to zero is best performed by calling the `clear()` method. Copies should be performed by assigning the entire object. + +Example: +```c++ +#include + +int const kN; +Array source; +Array destination; + +source.clear(); // set all elements to value of zero + +destination = source; // copy to `destination` +``` + +`Array<>` may be used to store elements smaller than one byte such as 4b integers. +```c++ +Array packed_integers; + +static_assert( + sizeof(packed_integers) == 1, + "Packed storage of sub-byte data types is compact."); + +// Access array elements using usual indirection and assignment operators +packed_integers[0] = 2_s4; +packed_integers[1] = 3_s4; + +CUTLASS_PRAGMA_UNROLL +for (auto x : elements) { + printf("%d", int(x)); // access elements normally +} + +``` + +### AlignedArray + +```c++ +template < + typename T, // element type + int N, // number of elements + int Alignment // alignment requirement in bytes +> +class AlignedArray; +``` + +`AlignedArray` is derived from `Array` and supports an optional alignment field. Pointers to objects of type `AlignedArray<>` reliably yield vectorized memory accesses when dereferenced. + +Example: +```c++ +int const kN = 8; +ArrayAligned source; +ArrayAligned const *ptr = ...; + +source = *ptr; // 128b aligned memory access +``` + +### AlignedBuffer + +```c++ +template < + typename T, // element type + int N, // number of elements + int Alignment // alignment requirement in bytes +> +class AlignedBuffer; +``` + +`AlignedBuffer` provides a uniform way to define aligned memory allocations for all data types. This is particularly +useful in defining allocations within shared memory with guaranteed memory alignment needed for vectorized access. +Note, constructors of the elements within AlignedBuffer<> are not called, and so the elements are initially in an +undefined state. + +Use `AlignedBuffer<>::data()` to obtain a pointer to the first element of the buffer. + +**Example:** Guaranteed aligned shared memory allocation. Note, shared memory contents are uninitialized. +```c++ +int const kN = 32; +int const kAlignment = 16; // alignment in bytes + +// Define a shared memory allocation in device code +__shared__ AlignedBuffer, kN, kAlignment> matrix_tile; + +complex *ptr = matrix_tile.data(); // ptr is guaranteed to have 128b (16 Byte) alignment +``` + +Note, `AlignedBuffer<>` only guarantees that its internal memory allocation is aligned, obtained by `AlignedBuffer<>::data()`. There is no guarantee that the `AlignedBuffer<>` object itself satisfies alignment constraints or that its internal memory allocation is contiguous. Device code performing vectorized memory accesses should use the `AlignedArray<>` type. + +**_Example_:** Vectorized memory access to shared memory allocations. +```c++ +int const kN = 1024; + +__shared__ AlignedBuffer smem_buffer; + +AlignedArray *ptr = reinterpret_cast *>(smem_buffer.data()); + +AlignedArray x = ptr[threadIdx.x]; // 128b shared memory load +``` + +### Numeric Conversion + +CUTLASS defines procedures for performing numeric conversion between data types in `cutlass/numeric_conversion.h`. +Where possible, these target hardware acceleration on the target architecture and support multiple rounding modes. + +```c++ +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" + +NumericConverter convert_f32_to_f16; +NumericConverter convert_f32_to_tf32; + +half_t x = convert_f32_to_f16(3.14159f); +tfloat32_t y = convert_f32_to_tf32(3.14159f); +``` + +Recent GPU architectures such as NVIDIA Turing and Ampere combine numeric conversion with efficient packing +into bit vectors. Consequently, CUTLASS defines conversion on both scalars and `Array<>` objects to implement +the optimal code sequence on all architectures. + +```c++ +// +// Example: convert and pack 32b signed integers to a vector of packed signed 8-bit integers. +// +int const kN = 16; +Array destination; +Array source; + +NumericConverter convert; + +destination = convert(source); +``` + +### Coord + +```c++ +template < + int Rank, + typename Index = int +> +class Coord; +``` + +`Coord` is a container used explicitly for defining logical coordinates in tensors of known rank. Traditional vector operators are defined such as `+`, `-`, and scalar multiplication `*` to simplify the creation of vector-valued expressions on tensor coordinates. + +**Example:** Vector operations on coordinates. +```c++ +Coord<2> compute_offset(Coord<2> const & base) { + + Coord<2> stride = make_Coord(1, kM); + + return base + stride * make_Coord(threadIdx.x, threadIdx.y); +} +``` + +Instances of `Coord<>` are used throughout CUTLASS to compute indices into tensors. Frequently, the dimensions of tensors of known layouts may be given names such as "rows" or "columns". To clarify the code, we have implemented several classes derived from `Coord<>` with accessors for each coordinate member. + +Such classes include: +```c++ +struct MatrixCoord : public Coord<2> { + Index & row(); + Index & column(); +}; +``` +and + +```c++ +struct Tensor4DCoord : public Coord<4> { + Index & n(); + Index & h(); + Index & w(); + Index & c(); +}; +``` + +### PredicateVector + +`PredicateVector` contains a statically sized array of hardware predicates packed into registers to enable efficient access within unrolled loops. + +This container is optimized for sequential access through iterators, though these are only efficient when used within fully unrolled loops. + +Moreover, instances of `PredicateVector<>` are not guaranteed to be updated until any non-const iterator objects have gone out of scope. This is because iterators are effectively caches that update the `PredicateVector<>` instance's internal storage as a batch. + +**Example:** Managing an array of predicates. +```c++ + +unsigned mask; +PredicateVector predicates; + +// Nested scope to update predicates via an iterator +{ + auto pred_it = predicates.begin(); + + CUTLASS_PRAGMA_UNROLL + for (int bit = 0; bit < kBits; ++bit, ++pred_it) { + bool guard = (mask & (1u << bit)); + pred_it.set(guard); + } +} + +// Efficient use of predicates to guard memory instructions +T *ptr; +Array fragment; + +auto pred_it = predicates.const_begin(); +for (int access = 0; access < kAccesses; ++access, ++pred_it) { + if (*pred_it) { + fragment[access] = ptr[access]; + } +} + +``` + +Note: `PredicateVector<>` is not efficient when accessed via dynamic random access. If an array of bits is needed with dynamic random access (in contrast with access via _constexpr_ indices), then `Array` should be used instead. + +## Functional + +CUTLASS defines function objects corresponding to basic arithmetic operations modeled after C++ Standard Library's `` header. + +CUTLASS extends this by defining `multiply_add` which computes `d = a * b + c`. The partial specialization `multiply_add>` computes complex-valued multiplication and addition using four real-valued multiply-add operations; these may correspond to native hardware instructions. + +Example: +```c++ +complex a; +complex b; +complex c; +complex d; + +multiply_add> mad_op; + +d = mad_op(a, b, c); // four single-precision multiply-add instructions +``` + +CUTLASS defines partial specializations for type `Array`, performing elementwise operations on each element. A further partial specialization for `Array` targets may target native SIMD instructions for compute capability SM60 and beyond. + +**Example:** Fused multiply-add of arrays of half-precision elements. +```c++ +static int const kN = 8; + +Array a; +Array b; +Array c; +Array d; + +multiply_add> mad_op; + +d = mad_op(a, b, c); // efficient multiply-add for Array of half-precision elements +``` + +## Numeric Conversion + +Operators are define to convert between numeric types in `numeric_conversion.h`. Conversion operators are defined in +terms of individual numeric elements and on arrays which enable the possibility of efficient hardware +support on current and future NVIDIA GPUs. + +**Example:** Converting between 32-b and 8-b integers. +```c++ + +``` + +# Copyright + +Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/gemm_api.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/gemm_api.md new file mode 100644 index 0000000000000000000000000000000000000000..a11a3bbc261d50fec361fd9fbd6962371504c2c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/gemm_api.md @@ -0,0 +1,573 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS GEMM API") + +[README](/README.md#documentation) > **CUTLASS GEMM API** + +# CUTLASS GEMM API + +CUTLASS presents a uniform programming model for matrix multiply-accumulate operations at each level of the hierarchy. This document +focuses on device-level, threadblock-level GEMMs, warp-level GEMMs, thread-level GEMMs, and instruction-level GEMMs. + +# CUTLASS GEMM Model + +CUTLASS implements the basic GEMM triple loop nest with a tiled structure mirroring the execution model hierarchy. + +The following pseudocode describes the model for a GEMM kernel targeting a warp-synchronous matrix multiply instruction like +mma.sync. The entire operation is referred to as "Gemm," as it is assumed that an epilogue operation performs the general matrix +update similar to BLAS. + +```c++ + // cutlass::gemm::device::Gemm + // +for (int cta_n = 0; cta_n < GemmN; cta_n += CtaTileN) { // for each CTA } CTA-level concurrency + for (int cta_m = 0; cta_m < GemmM; cta_m += CtaTileM) { // for each CTA } + // + // cutlass::gemm::threadblock::Mma + // + for (int cta_k = 0; cta_k < GemmK; cta_k += CtaTileK) { // "GEMM mainloop" - no unrolling - one iteration of this loop is one "stage" + // + for (int warp_n = 0; warp_n < CtaTileN; warp_n += WarpTileN) { // for each warp } warp-level concurrency + for (int warp_m = 0; warp_m < CtaTileM; warp_m += WarpTileM) { // for each warp } + // + for (int warp_k = 0; warp_k < CtaTileK; warp_k += WarpTileK) { // fully unroll across CtaTileK - one iteration of this loop is one "k Group" + // + for (int mma_k = 0; mma_k < WarpTileK; mma_k += MmaK) { // cutlass::gemm::warp::Mma + for (int mma_n = 0; mma_n < WarpTileN; mma_n += MmaN) { // + for (int mma_m = 0; mma_m < WarpTileM; mma_m += MmaM) { // + // + mma_instruction(d, a, b, c); // cutlass::arch::mma - warp-wide matrix multiply instruction + + } // for mma_m + } // for mma_n + } // for mma_k + + } // for warp_k + } // for warp_m + } // for warp_n + + } // for cta_k + } // for cta_m +} // for cta_n + +``` + +The outer-most loops correspond to CTA-level hardware concurrency and are not explicitly written as loops in the code. These +are implied by CUDA grid launch semantics. + +The comment `cutlass::gemm::threadblock::Mma` refers to the threadblock-scoped matrix multiply-accumulate concept. This is +the computation performed by one threadblock to compute a matrix product in registers. The "GEMM main loop" is listed. + +The comment `cutlass::gemm::warp::Mma` refers to the computation performed by each warp. This is a nested loop executing a +sequence of accumulated outer products. + +The inner-most operation corresponds directly to hardware support. In this example, the nested structure terminates with +warp-synchronous matrix multiply instructions targeting Tensor Cores. +Alternatively, GEMMs targeting single-thread instructions may have an additional series of nested loops corresponding to +thread-level concurrency. + +# CUTLASS GEMM Components + +This loop nest is expressed in CUTLASS via the following components which are specialized for data type, layout, and +math instruction. + +![ALT](/media/images/cutlass-gemm-components.png "CUTLASS GEMM Components") + +These components are described in the following sections. + +## Device-wide GEMM API + +The device-level GEMM API is intended to streamline instantiation and execution of the standard +GEMM computation across the GPU. This operator is intended to be used in host-side .cu code and +has semantics similar to cuBLAS. + +The device-wide GEMM API is embodied by the following operators: +- [cutlass::gemm::device::Gemm](/include/cutlass/gemm/device/gemm.h) - basic GEMM operation +- [cutlass::gemm::device::GemmArray](/include/cutlass/gemm/device/gemm_array.h) - batched GEMM operation in which input matrices are read from arrays of pointers +- [cutlass::gemm::device::GemmBatched](/include/cutlass/gemm/device/gemm_batched.h) - batched GEMM operation in which input matrices are separated by a constant stride +- [cutlass::gemm::device::GemmSplitKParallel](/include/cutlass/gemm/device/gemm_splitk_parallel.h) - GEMM operation that partitions the GEMM K dimension then launches a separate reduction kernel + +**Example:** launch a mixed-precision GEMM targeting Volta Tensor Cores. +```c++ + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, // ElementA + cutlass::layout::ColumnMajor, // LayoutA + cutlass::half_t, // ElementB + cutlass::layout::ColumnMajor, // LayoutB + cutlass::half_t, // ElementOutput + cutlass::layout::ColumnMajor, // LayoutOutput + float, // ElementAccumulator + cutlass::arch::OpClassTensorOp, // tag indicating Tensor Cores + cutlass::arch::Sm70 // tag indicating target GPU compute architecture + >; + + Gemm gemm_op; + cutlass::Status status; + + // + // Launch GEMM on the device + // + + status = gemm_op({ + {m, n, k}, + {ptrA, lda}, + {ptrB, ldb}, + {ptrC, ldc}, + {ptrD, ldd}, + {alpha, beta} + }); + + if (status != cutlass::Status::kSuccess) { + return -1; + } +``` + + +## Threadblock-level GEMM API + +GEMMs at this scope are expected to efficiently load tiles of data from global memory into internal storage and then compute matrix +products with warp-level GEMM operators. + +The threadblock-scoped matrix multiply operation is embodied by +[cutlass::gemm::threadblock::MmaPipelined](/include/cutlass/gemm/threadblock/mma_pipelined.h). +This is a class inspired by [std::transform_reduce()](https://en.cppreference.com/w/cpp/algorithm/transform_reduce) +which computes the accumulated matrix product of a range of tiles defined by tile iterators. + +![ALT](/media/images/cutlass-threadblock-mma-pipelined.png "cutlass::gemm::threadblock::MmaPipelined") + +In the case of GEMM, the tile iterators are +[cutlass::transform::threadblock::PredicatedTileIterator](/include/cutlass/transform/threadblock/predicated_tile_iterator.h) +to traverse a sequence of tiles in global memory with appropriate predication to avoid out-of-bounds +memory accesses. + +*Concept.* Threadblock-level matrix multiply accumulate operators are function objects satisfying the following concept. +```c++ +struct Mma { + /// Shape of warp-level matrix operation (concept: GemmShape) + struct Shape; + + /// Data type of multiplicand A (concept: numeric type) + struct ElementA; + + /// Layout of multiplicand A (concept: Layout) + struct LayoutA; + + /// Data type of multiplicand B (concept: numeric type) + struct ElementB; + + /// Layout of multiplicand B (concept: Layout) + struct LayoutB; + + /// Data type of accumulator matrix C (concept: numeric type) + struct ElementC; + + /// Layout of accumulator matrix C (concept: Layout) + struct LayoutC; + + /// Iterator of A operand in shared memory - satisfies: ReadableRandomAccessTileIteratorConcept + struct IteratorA; + + /// Fragment object loaded from IteratorA (concept: Array) + struct FragmentA; + + /// Iterator of B operand in shared memory - satisfies: ReadableRandomAccessTileIteratorConcept + struct IteratorB; + + /// Fragment object loaded from IteratorB (concept: Array) + struct FragmentB; + + /// Iterator of C operand in shared memory - + /// satisfies: ReadableRandomAccessTileIteratorConcept | WriteableRandomAccessTileIteratorConcept + struct IteratorC; + + /// Fragment object loaded from IteratorC (concept: Array) + struct FragmentC; + + /// Warp-level matrix multiply operator (concept: satisfies gemm::warp::Mma) + struct Operator; + + // + // Method + // + + /// Computes a matrix product accumulated in D + CUTLASS_DEVICE + void operator()( + FragmentC &D, + IteratorA iter_A, + IteratorB iter_B, + FragmentC const &C); +}; +``` + +## Warp-level Matrix Multiply API + +Warp-level GEMM operators load tiles from shared memory into registers and then compute matrix multiplies using either +Tensor Cores or CUDA Cores. The result is accumulated in a register tile. Iterators are defined for each +operand `A`, `B`, and `C`. + +The warp-level GEMM API is a generalization of CUDA's WMMA API to achieve the following objectives: + +- native matrix multiply sizes of Tensor Cores +- permuted shared memory layouts to ensure conflict-free accesses +- pointer initilization outside of the mainloop +- efficient traversal + +Defining a warp-level matrix multiply in CUTLASS is similar to WMMA as shown below. + +![ALT](/media/images/cutlass-warp-level-gemm-api-instantiation.png "CUTLASS vs WMMA API") + +The usage model is also similar. The following example computes a warp-level GEMM operation, +accumulating a series of matrix products in a register-backed array. The input to a warp-level +GEMM operation in CUTLASS _must_ be data in shared memory loaded by iterators or on +register-backed fragments. + +![ALT](/media/images/cutlass-warp-level-gemm-operation.png "CUTLASS warp-level GEMM API") + +```c++ +#include "cutlass/gemm/warp/default_mma_tensor_op.h" + +using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + +using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< + cutlass::sizeof_bits::value, 64>; + +using WarpMma = typename cutlass::gemm::warp::DefaultMmaTensorOp< + cutlass::gemm::GemmShape<64, 64, 8>, // Overall warp-level GEMM operation + cutlass::gemm::GemmShape<16, 8, 8>, // Target instruction + cutlass::half_t, LayoutA, // operand A type and layout + cutlass::half_t, LayoutB, // operand B type and layout + float, // accumulator type + cutlass::layout::RowMajor>::Type; // accumulator layout + +// +// Define a GEMM operation loading data from shared memory +// +int const kGemmK = 32; + +__shared__ ElementA smem_buffer_A[WarpMma::Shape::kM * kGemmK]; +__shared__ ElementB smem_buffer_B[WarpMma::Shape::kN * kGemmK]; + +// +// Construct iterators into SMEM tiles +// + +// leading dimensions inferred from matrix problem size +int lda = WarpMma::Shape::kM; +int ldb = WarpMma::Shape::kN; + +// iterators into shared memory +WarpMma::IteratorA warp_iterator_A({smem_buffer_A, lda}); +WarpMma::IteratorB warp_iterator_B({smem_buffer_B, ldb}); + +// Fragments in registers storing the operands +FragmentA frag_A; +FragmentB frag_B; +FragmentC accum; + +WarpMma mma; + +accum.clear(); + +// +// Accumulated outer product +// + +#pragma unroll 1 +for (int k = 0; k < kGemmK; k += WarpMma::Shape::kK) { + + + iter_A.load(frag_A); // Load fragments from A and B matrices + iter_B.load(frag_B); + + ++iter_A; ++iter_B; // Advance along GEMM K to next tile in A + // and B matrices + + // Compute matrix product + mma(accum, frag_A, frag_B, accum); +} +``` + +*Concept.* Warp-level Mma operations are function objects satisfying the following concept. + +```c++ +struct Mma { + /// Shape of warp-level matrix operation (concept: GemmShape) + struct Shape; + + /// Data type of multiplicand A (concept: numeric type) + struct ElementA; + + /// Layout of multiplicand A (concept: Layout) + struct LayoutA; + + /// Data type of multiplicand B (concept: numeric type) + struct ElementB; + + /// Layout of multiplicand B (concept: Layout) + struct LayoutB; + + /// Data type of accumulator matrix C (concept: numeric type) + struct ElementC; + + /// Layout of accumulator matrix C (concept: Layout) + struct LayoutC; + + /// Iterator of A operand in shared memory - satisfies: ReadableRandomAccessTileIteratorConcept + struct IteratorA; + + /// Fragment object loaded from IteratorA (concept: Array) + struct FragmentA; + + /// Iterator of B operand in shared memory - satisfies: ReadableRandomAccessTileIteratorConcept + struct IteratorB; + + /// Fragment object loaded from IteratorB (concept: Array) + struct FragmentB; + + /// Iterator of C operand in shared memory - + /// satisfies: ReadableRandomAccessTileIteratorConcept | WriteableRandomAccessTileIteratorConcept + struct IteratorC; + + /// Fragment object loaded from IteratorC (concept: Array) + struct FragmentC; + + /// Indicates class of matrix operator (arch::OpClassSimt or arch::OpClassTensorOp) + struct OperatorClass; + + // + // Methods + // + + /// Computes a matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + FragmentC &D, + IteratorA A, + IteratorB B, + FragmentC const &C); +}; +``` + + + +*Tensor Core Operators.* Warp-level matrix multiply operators targeting Tensor Cores +may be defined with the following template arguments. The `Policy` type specifies implementation-level details which may +be used to affect performance or internal implementation of the warp-level operator. + +```c++ +namespace cutlass { +namespace gemm { +namespace warp { + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename ElementC_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Shape of the warp in units of thread (concept: MmaSimtPolicy) + typename Policy_, + /// Used for partial specialization + typename Enable = bool +> +class MmaTensorOp {} + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +``` + +*SIMT Math Instructions.* Warp-level matrix multiply operators targeting CUDA Cores +may be defined with the following template arguments. The `Policy` type specifies implementation-level details which may +be used to affect performance or internal implementation of the warp-level operator. + +```c++ +/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename ElementC_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Shape of the warp in units of thread (concept: MmaSimtPolicy) + typename Policy_, + /// Used for partial specialization + typename Enable = bool +> +class MmaSimt; +``` + + +## Thread-level GEMM API + +Thread-level GEMM operations perform matrix multiply-accumulate on data held in registers. These target CUDA Cores exclusively. + +*Concept.* Thread-level matrix multiply operations are function objects satisfying the following concept. +```c++ +struct Mma { + + /// Shape of warp-level matrix operation (concept: GemmShape) + struct Shape; + + /// Data type of multiplicand A (concept: numeric type) + struct ElementA; + + /// Layout of multiplicand A (concept: Layout) + struct LayoutA; + + /// Fragment object loaded from IteratorA (concept: Array) + struct FragmentA; + + /// Data type of multiplicand B (concept: numeric type) + struct ElementB; + + /// Layout of multiplicand B (concept: Layout) + struct LayoutB; + + /// Fragment object loaded from IteratorA (concept: Array) + struct FragmentB; + + /// Data type of accumulator matrix C (concept: numeric type) + struct ElementC; + + /// Layout of accumulator matrix C (concept: Layout) + struct LayoutC; + + /// Fragment object loaded from IteratorA (concept: Array) + struct FragmentC; + + // + // Methods + // + + /// Computes a matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + FragmentC &D, + FragmentA const &A, + FragmentB const &B, + FragmentC const &C); +}; +``` + +The CUTLASS thread-level GEMM template accepts the following template arguments. +```c++ +namespace cutlass { +namespace gemm { +namespace thread { + +/// Structure to compute the matrix product +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape, + /// Data type of A elements + typename ElementA, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA, + /// Data type of B elements + typename ElementB, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB, + /// Element type of C matrix + typename ElementC, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC, + /// Concept: arch::OpMultiplyAdd or arch::Mma<> + typename Operator = arch::OpMultiplyAdd, + /// Used for partial specialization + typename Enable = bool +> +struct Mma; + +} // namespace thread +} // namespace gemm +} // namespace cutlass +``` + +## Efficient Epilogue + +CUTLASS GEMM operators perform mma followed by epilogue operation similar +to cuBLAS. CUTLASS implements an efficient row-major epilogue. Thus, to achieve +column-major GEMM, operands A & B are transposed and swapped. + +To enable efficient row-major epilogue for both row-major and column-major output layout, +CUTLASS' device-level GEMM operators `cutlass::device::Gemm` and `cutlass::device::GemmUniversal` +provide two template definitions: +- (a) [General definition](/include/cutlass/gemm/device/gemm.h#L217) +- (b) [Specialized definition for column-major source/output](/include/cutlass/gemm/device/gemm.h#L545) + +Efficient row-major epilogue for: +- (i) GEMM operator on row-major source/output uses template (a). It runs row-major GEMM and +an efficient row-major epilogue. +- (ii) GEMM operator on column-major source/output uses template (b). It transposes and swaps +operands A and B to enable efficient epilogue. `A x B = C => Transpose(B) x Transpose(A) = Transpose(C)`. +For column-major source (C) matrix, Transpose(C) is row-major, and efficient epilogue works on +row-major. + +Note that cuBLAS typically expects a column-major source (C) and output matrix (D). Thus, +CUTLASS library only instantiates and generates GEMM operatos with column-major layout. However, +CUTLASS by itself can run both row-major and column-major output layouts for all combinations +of input layouts. Thus, CUTLASS supports the following layout combinations for input and output layouts: + +- `{N,T} x {N,T} => {N,T}` - NN, TN, TN, TT GEMM for both row-major and column-major output + +## Instruction-level operations + +CUTLASS defines a template-based interface to Tensor Core operations to avoid resorting +to inline PTX. + +- [mma_sm70.h](/include/cutlass/arch/mma_sm70.h) - Volta TensorCore operations +- [mma_sm75.h](/include/cutlass/arch/mma_sm75.h) - Turing TensorCore operations + + +# Copyright + +Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/gemm_api_3x.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/gemm_api_3x.md new file mode 100644 index 0000000000000000000000000000000000000000..8197d2e7218501ce52dd1f95a20d0bf1c9e38362 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/gemm_api_3x.md @@ -0,0 +1,702 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS GEMM API") + +[README](/README.md#documentation) > **CUTLASS 3.0 GEMM API** + +# CUTLASS 3.0 GEMM API + +CUTLASS presents a uniform programming model +for matrix multiply-accumulate (MMA) operations +at different levels of the GPU system hierarchy. +CUTLASS 3.0 has GEMM APIs corresponding to the following levels +in order of highest to the lowest level. + +1. Device +2. Kernel +3. Collective +4. Tiled MMA and Copy +5. Atom + +This document will cover the first three levels in detail: +Device, Kernel, and Collective. +It also briefly discusses the Tiled MMA/Copy and Atom level, +and then refers readers to CuTe's tutorial for more information. + +# CUTLASS GEMM Model + +CUTLASS implements algorithms that express +the classical "triply nested loop" GEMM algorithm +with a tiled structure mirroring the above hierarchy. + +The following pseudocode describes the model for a GEMM kernel +targeting a warp-synchronous matrix multiply instruction like `mma.sync.` +The entire operation is referred to as "Gemm," +as it is assumed that an epilogue operation +performs the general matrix update similar to BLAS. +This is pseudocode and is only meant to illustrate which parts of the layers +correspond to the inner or outer loops of the GEMM. + +```c++ +// cutlass::gemm::kernel::GemmUniversal: ClusterTileM and ClusterTileN loops +// are either rasterized by the hardware or scheduled by the kernel in persistent kernels. +// Parallelism over thread block clusters +for (int cluster_m = 0; cluster_m < GemmM; cluster_m += ClusterTileM) { + for (int cluster_n = 0; cluster_n < GemmN; cluster_n += ClusterTileN) { + + // cutlass::gemm::collective::CollectiveMma: mainloop that iterates over all k-tiles + // No loop unrolling is performed at this stage + for (int k_tile = 0; k_tile < size<2>(gmem_tensor_A); k_tile++) { + + // loops inside cute::gemm(tiled_mma, a, b, c); Dispatch 5: (V,M,K) x (V,N,K) => (V,M,N) + // TiledMma uses the hardware instruction provided through its Mma_Atom + // TiledMma's atom layout, value layout, and permutations define the iteration order + for (int tiled_mma_k = 0; tiled_mma_k < size<2>(A); tiled_mma_k++) { + for (int tiled_mma_m = 0; tiled_mma_m < size<1>(A); tiled_mma_m++) { + for (int tiled_mma_n = 0; tiled_mma_n < size<1>(B); tiled_mma_n++) { + + // TiledMma's vector mode dispatches to the underlying instruction. + mma.call(d, a, b, c); + } // tiled_mma_n + } // tiled_mma_m + } // tiled_mma_k + } // k_tile mainloop + } // cluster_m +} // cluster_n +``` + +The first three nested `for` loops +correspond to parallelism over thread block clusters. +The code does not actually express them as explicit `for` loops. +Instead, the parallelization scheme over tiles +is implied by CUDA grid launch semantics. +However, for persistent kernels, +these three loops are expressed in the source code +as a single `while` loop that queries the +[work tile scheduler](/include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp) +for problem tiles on which to compute. + +Inside the three nested `for` loops, +one finds code that pulls matrix tiles +from global memory into more "local" memory +(like shared memory or registers) +and computes MMAs. +These tiled copy and tiled mma iterations are generally +fully static and get fully unrolled. + +# CUTLASS GEMM Components + +CUTLASS expresses the above loop nest +with the following components which are specialized for +data type, layout, and math instruction. + +| API level | API Class and/or function names | +| --- | --- | +| Device | `cutlass::gemm::device::GemmUniversalAdapter` | +| Kernel | `cutlass::gemm::kernel::GemmUniversal` | +| Collective | `cutlass::gemm::collective::CollectiveMma`
`cutlass::epilogue::collective::DefaultEpilogue`
`cutlass::epilogue::collective::Epilogue`
| +| Tiled (MMA and Copy) | `cute::TiledMma` and `cute::TiledCopy`
`cute::gemm()` and `cute::copy()` | +| Atom | `cute::Mma_Atom` and `cute::Copy_Atom` | + +In CUTLASS 3.0, we assemble kernels +by first composing a collective mainloop and collective epilogue +together at the kernel layer, +and then wrapping them with a host-side adapter +to form a GEMM handle to that kernel. + +The following sections describe these components +in the order a user should instantiate them +in order to assemble a kernel. This order is + +1. assemble the required collective mainloop and epilogues, + +2. compose them together to build a kernel type, and + +3. wrap up the kernel with a device layer adapter. + +This order is also reflected in the [CUTLASS 3.0 Hopper kernel examples](/examples/48_hopper_warp_specialized_gemm) as seen in the excerpt below. + +```c++ +// Step 1: Generate the required collective layer mainloop specialization +using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + ArchTag, OperatorClass, + ElementA, LayoutA, AlignmentA, + ElementB, LayoutB, AlignmentB, + ElementAccumulator, + TilesShape, ClusterShape, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + +// Step 2: Specify the collective layer epilogue type +using CollectiveEpilogue = cutlass::epilogue::collective::DefaultEpilogue< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombination>; + +// Step 3: Compose the mainloop and epilogue together at the kernel layer +using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + cute::Shape, // ProblemShape [M,N,K,L] + CollectiveMainloop, + CollectiveEpilogue +>; + +// Step 4: Wrap up the kernel::GemmUniversal kernel class +// with the device adapter to obtain a host-side handle to the kernel +using GemmHandle = cutlass::gemm::device::GemmUniversalAdapter; +``` + +Towards the end, we also briefly cover CuTe's tiled mma and copy as well as the atom layer APIs, +before redirecting users to CuTe-specific documentation for further details. + +## Collective API + +A Collective is "the largest collection of threads +onto which mma atoms and copy atoms are tiled." +That is, it is the largest number of threads in a grid +that can cooperate by leveraging hardware features +for accelerated communication and synchronization. +These hardware features include + +* asynchronous array copy + (e.g., from global memory to shared memory); + +* MMA instructions + for small tiles that live in shared memory; + +* synchronization operations for clusters, + thread blocks, and/or warps; and/or + +* hardware acceleration (such as barriers) + for ensuring that data dependencies + between asynchronous operations are met. + +A Collective uses the `TiledMma` and `TiledCopy` API (see below) +to access operations that copy and perform MMA on tiles. + +Different units of parallelism +(e.g., threads, warps, or thread blocks) +in a Collective might have different roles. +For example, in "warp-specialized" algorithms, +some warps may be responsible for copying data, +while others may be responsible for computation. +Nevertheless, the different units of parallelism +still need to share data and coordinate access +to the shared data. For example, +the producer warps in a warp-specialized algorithm +that copy input matrix tiles into shared memory +need to let the consumer MMA warp(s) know +that their MMA inputs are ready. +We contrast this with the `kernel::` layer API, +which schedules the collectives over *independent* tiles in the grid. + +The Collective API includes both the "mainloop" +of matrix multiply-accumulate, and the epilogue. +This API is the composition point for optimizations +such as mainloop fusions and epilogue fusions. +It is responsible for implementing +the `k_tile` loop in the above triply nested loop pseudocode. + +### Collective Mainloops + +The `cutlass::gemm::collective::CollectiveMma` class +is the primary interface to the collective +matrix multiply-accumulate (MMA) mainloops. +"Mainloop" refers to the "main loop" over tiles -- +the "cluster tile k" loop in the pseudocode +near the top of this document. +Any looping over multiple tiles that +the algorithm might need to do would happen here. + +The `CollectiveMma` class is declared in the header +[cutlass/gemm/collective/collective_mma.hpp](/include/cutlass/gemm/collective/collective_mma.hpp). + +```c++ +namespace cutlass::gemm::collective { + +template < + class DispatchPolicy, + class TileShape, + class ElementA, + class StrideA, + class ElementB, + class StrideB, + class TiledMma, + class GmemTiledCopyA, + class SmemLayoutAtomA, + class SmemCopyAtomA, + class TransformA, + class GmemTiledCopyB, + class SmemLayoutAtomB, + class SmemCopyAtomB, + class TransformB +> +struct CollectiveMma { + static_assert(sizeof(ElementA) == 0, "Could not find a mainloop specialization."); +}; + +} // namespace cutlass::gemm::collective +``` + +- `DispatchPolicy` is the most important type for a collective, and is +[covered in more detail below](#collective-dispatch-policies). + +- `StrideA` and `StrideB` are instances of type `cute::Stride` that represent the global memory layout of A and B tensors. These strides are required to be rank-3, representing the modes `[outer, inner, batch]`. Each of the 3 ranks can be a multi-modal hierarchical stride; this would apply if implementing a tensor contraction. + +- `TiledMma` is an instance of `cute::TiledMma`. + +- `GmemTiledCopyA` and `GmemTiledCopyB` are instances of `cute::TiledCopy` types. Both tiled operation types are [covered in more detail below](#tiled-mma-and-copy). + +- `SmemLayoutAtomA` and `SmemLayoutAtomB` are instances of type `cute::Layout` and represent the smallest +layout that will get tiled over the entire collective's shared memory. This layout does _not_ include the +pipeline mode, and therefore, both are expected to be rank 2 layouts of shape [`outer`, `inner`]. + +- `SmemCopyAtomA` and `SmemCopyAtomB` are `Copy_Atom`s to be used for moving data from shared memory +into register memory. + +Notice that CUTLASS 3.0 mainloops do not accept a dedicated accumulator element type. +We obtain the accumulator type from the `typename TiledMma::ValTypeC`. Note also that +top level API's `ElementA` and `ElementB` can differ from those of the MMA facing +`typename TiledMma::ValTypeA` and `typename TiledMma::ValTypeB`, allowing TMA or user +supplied transform operations to perform type conversions. + +### Collective Dispatch Policies + +`CollectiveMma` implementations are not generic. +Instead, they must be specialized for each algorithm and GPU architecture. +Users can dispatch to a `CollectiveMma` specialization +by picking template arguments matching that specialization. +CUTLASS 3.0 adopts a tag-based dispatch policy type to specialize +mainloop implementations and add tuning knobs to them. + +Below is an example of one of the dispatch policies that is used to dispatch to a Hopper TMA +warp-specialized mainloop implementation: + +```c++ +// n-buffer in smem (Hopper TMA), +// pipelined with Hopper GMMA and TMA, +// warp-specialized dynamic schedule +template< + int Stages_, + class ClusterShape_ = Shape<_1,_1,_1>, + class KernelSchedule = KernelTmaWarpSpecializedCooperative +> +struct MainloopSm90TmaGmmaWarpSpecialized { + constexpr static int Stages = Stages_; + using ClusterShape = ClusterShape_; + using ArchTag = arch::Sm90; + using Schedule = KernelSchedule; +}; +``` + +The `Stages_` template parameter lets the user freely vary the number of pipeline stages, +while the `ClusterShape_` type allows for parameterization over the shape of the threadblock +cluster over which TMA multicast will take place. + +The collective dispatch policy is also the primary point of composing various kernel schedules +freely with any mainloop. Each mainloop policy either prescribes a `Schedule` with which +it needs to be run, or exposes a template API that lets the user pick a subset of the following schedules: + +```c++ +struct KernelMultistage { }; +struct KernelTma { }; +struct KernelTmaWarpSpecialized { }; +struct KernelTmaWarpSpecializedPingpong { }; +struct KernelTmaWarpSpecializedCooperative { }; +``` + +- A single kernel schedule can support multiple mainloop implementations. For example, +`KernelMultistage` can be composed with many different mainloop implementations across GPU +architectures such as `MainloopSm70TwoStage`, `MainloopSm80CpAsyncUnpredicated`, `MainloopSm90CpAsyncGmma`, and many more. + +- A single mainloop can be composed with multiple +possible kernel schedules. For example, the `MainloopSm90TmaGmmaWarpSpecialized` can be +composed with any of the `KernelTmaWarpSpecialized`, `KernelTmaWarpSpecializedPingpong` or `KernelTmaWarpSpecializedCooperative` +kernel schedules. + +As [discussed in the CUTLASS 3.0 design documentation](cutlass_3x_design.md), adopting tag +dispatch policies for our core vocabulary types allows us to maintain a single type name for +all operations that conceptually belong to the same class. This design has the following benefits. + +- It *avoids code duplication* in cases where mainloops can be composed with multiple kernels or vice versa. +- It *makes writing generic code easier*, as the primary type name `CollectiveMma` does not change across any implementation. +- It *provides a clear, singular extension point* for users to plug in new, custom mainloops implementations specialized on their own dispatch policies. + +### Collective Builder for `CollectiveMma`s + +The primary `CollectiveMma` is intended to be an expert user interface that allows full control over +all the properties of the collective's GPU micro-kernel. However, often a user just wants an +off-the-shelf GEMM mainloop implementation parameterized on simple configuration parameters. CUTLASS 3.0 +provides [`cutlass::gemm::collective::CollectiveBuilder`](/include/cutlass/gemm/collective/collective_builder.hpp) for such scenarios. + +```c++ +namespace cutlass::gemm::collective { +template < + class ArchTag, + class OpClass, + class ElementA, + class GmemLayoutA, + int AlignmentA, + class ElementB, + class GmemLayoutB, + int AlignmentB, + class ElementAccumulator, + class TileShape_MNK, + class ClusterShape_MNK, + class StageCountType, + class KernelScheduleType, + class Enable = void +> +struct CollectiveBuilder { + static_assert(sizeof(ElementA) == 0, "Could not build a collective for given parameters."); +}; +} // namespace cutlass::gemm::collective +``` + +`CollectiveBuilder` accepts CUTLASS 2.x equivalent input template arguments, and attempts to build +the best performing `CollectiveMma` from the given parameters. + +- `ArchTag` is one of the SM architectures tags from `cutlass::arch::Sm*`. +- `OpClass` is one of the operator class tags from `cutlass::arch::Sm*`. +- `ElementA` and `ElementB` are the logical value types of the A resp. B tensors. +- `ElementAccumulator` is the accumulator type to be used in the instruction. +- `GmemLayoutA` and `GmemLayoutB` are CUTLASS 2.x layout tags, `layout::RowMajor` or `layout::ColumnMajor`. +- `AlignmentA` and `AlignmentB` are global memory alignments of A and B tensors in terms of element count. +- `TileShape_MNK` is an instance of `cute::Shape` that is rank-3, representing the MxNxK collective tile shape. +- `ClusterShape_MNK` is an instance of `cute::Shape` that is rank-3, representing the MxNxK threadblock cluster tile shape. +- `StageCountType` is either `collective::StageCountAuto` or an instance of `collective::StageCount`. +- `KernelScheduleType` is either `collective::KernelScheduleAuto` or one of the specific kernel schedule tags discussed in the [dispatch policy section](#collective-dispatch-policies) above. + +`StageCountAuto` allows the collective builder to compute the size of a single stage's size in shared memory +and maximize the shared memory usage assuming 1 threadblock / multiprocessor occupancy. + +`KernelScheduleAuto` allows the collective builder to pick the best kernel schedule available for the +given set of parameters, or let's the user override this with a specific kernel schedule type. + +Note that collective builders are still in beta, and their functionality +does not map onto the full design space that the primary expert `CollectiveMma` API +allows for. We expect their supported mainloop types to expand in future releases, but +with 3.0, only SM90 tensorop kernels are supported through the builder API. The builder API +may also change in the future as we adopt user feedback. + +If the builder is able to provide a collective mainloop type for the given set of parameters, +it will be aliased within as `CollectiveOp`. For more information on how to +parameterize kernels conveniently with the collective builder, please see example [49_hopper_gemm_with_collective_builder](/examples/49_hopper_gemm_with_collective_builder). + +### Epilogue + +The collective epilogue implements element-wise operations +involving the output matrix. Users can provide a custom +epilogue, or use one of the standard epilogues. +These live in the directory +[include/cutlass/epilogue/collective/](/include/cutlass/epilogue/collective/), +and include classes like +`cutlass::epilogue::collective::DefaultEpilogue` +and +`cutlass::epilogue::collective::Epilogue`. +CUTLASS's provided collective epilogues +do not live under `include/cutlass/gemm` +or in the `cutlass::gemm` namespace, +because they can be used for computations +other than GEMM. + +## Kernel API + +The kernel is "a collection of all clusters in the grid." +The kernel layer schedules have four main responsibilities. + +- Ordering the execution of collectives within the kernel, performing any synchronization between that may be necessary +- Marshalling the threads of a warp specialized schedules into their respective roles +- Performing any necessary grid swizzling logic +- Tiling the input tensors with the threadblock cluster value tile before invoking the collectives on them + +The Kernel API is the entry point for a grid of thread blocks +that may or may not be organized in a cluster. +It is the composition point for fusing back-to-back GEMMs, +epilogues, and/or other operations. + +The entry point API for CUTLASS 3.0 kernel is the class +`cutlass::gemm::kernel::GemmUniversal`, found in the header file +[include/cutlass/gemm/kernel/gemm_universal.hpp](/include/cutlass/gemm/kernel/gemm_universal.hpp). +`GemmUniversal` is a stateless universal device kernel +that implements GEMM as the composition of two parts: + +* a collective mainloop, and +* a collective epilogue + +```cpp +namespace cutlass::gemm::kernel { +/* + * Stateless universal device GEMM kernel type that treats GEMM as + * a composition of a collective mainloop and a collective epilogue. + * + * Supports both the 2.x and 3.x APIs based on whether the first type is + * a cute::tuple<> or not. + * 2.x API implementation: cutlass/gemm/kernel/gemm_universal.h + * 3.x API implementation: cutlass/gemm/kernel/gemm_*.hpp + * + * In the following declaration, the name preceding the 'Or' refers to + * 3.x API type argument order, and the name succeeding the 'Or' refers to + * 2.x API type argument order. Template arguments without two names + * belong to the 3.x API only. +**/ +template < + class ProblemShapeOrThreadblockMma_, // (m, n, k) or (m, n, k, l) + class CollectiveMainloopOrEpilogue_, + class CollectiveEpilogueOrThreadblockSwizzle_, + class TileScheduler_ = void, + class Enable = void +> +class GemmUniversal; +} // namespace cutlass::gemm::kernel +``` + +*Stateless* means that the caller -- +for example, the Device API described above -- +manages the kernel's state. +The kernel just takes input and output parameters (`Params`). + +*Universal* means that `GemmUniversal` works +for both CUTLASS 3.0 and 2.x interfaces +and across a broad range of kernel schedules. +If `GemmUniversal`'s first template argument is a `cute::Shape`, +then `GemmUniversal` assumes that the remaining template arguments +implement the 3.0 APIs. Otherwise, `GemmUniversal` assumes that +the remaining template arguments implement the 2.x APIs. +Starting with CUTLASS 3.0, the problem shape has been promoted +to a top-level template API for the GEMM kernel. +This supports fully static GEMM instantiations +where the user expects to know some or all +of the problem shapes at compile time +in order to extract even more performance. + +The *collective mainloop* implements MMA on local tiles. +The *collective epilogue* addresses any operations after the MMA, +such as applying the `beta * C` part of `C := beta * C + alpha * A * B`. +We will explain *collective* in more detail below. + +Specializations of `kernel::GemmUniversal` for 3.0 APIs live in +any of various `gemm_*.hpp` files in the directory +[include/cutlass/gemm/kernel/](/include/cutlass/gemm/kernel/). +Specializations for 2.x APIs can be found in the header file +[include/cutlass/gemm/kernel/gemm_universal.h](/include/cutlass/gemm/kernel/gemm_universal.h). + +CUTLASS 3.x implements various embodiments of `kernel::GemmUniversal`. +Each kernel layer schedule is specialized +for a GEMM scheduling algorithm and GPU architecture. +Specializations of `kernel::GemmUniversal` for 3.0 APIs live in +any of various `include/cutlass/gemm/kernel/{arch_tag}*.hpp` files in the directory +[include/cutlass/gemm/kernel/](/include/cutlass/gemm/kernel/). +Which specialization to dispatch to is decided through the dispatch policy's `Schedule` type. + +For example, the header file +[include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_pingpong.hpp](/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_pingpong.hpp) +has a specialization of `kernel::GemmUniversal` for Hopper +that uses a warp-specialized mainloop with a persistent scheduling algorithm, +while the header file +[include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp](/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp) +has a specialization of `GemmUniversal` for Hopper +that uses a warp-specialized but non-persistent algorithm. + +To support composition between supported kernel schedules and mainloop dispatch policies without having to +duplicate collective mainloop implementations, GEMM kernel layer schedules can be composed with +any mainloop that specifies their corresponding kernel schedule as their `Schedule` type in the policy. +This is discussed in detail in the [collective dispatch policy section](#collective-dispatch-policies) above. + +```c++ +// An example of the SM90 KernelMultistage kernel's +// specialization logic that allows it to be composed +// with many mainloops such as `MainloopSm80CpAsync` +// and `MainloopSm70TwoStage`. +template < + class ProblemShape_, + class CollectiveMainloop_, + class CollectiveEpilogue_, + class TileScheduler_ +> +class GemmUniversal< + ProblemShape_, + CollectiveMainloop_, + CollectiveEpilogue_, + TileScheduler_, + std::enable_if_t>> +``` + +## Device API + +The Device API is a universal, kernel-agnostic host interface +for kernel launch and managing the lifetime of +reusable host-side parameters. + +This API is how users' host-side .cu code +invokes CUTLASS's single-GPU GEMM kernels. +It serves the same purpose as cuBLAS and behaves similarly. + +The entry point for the Device GEMM API is the class +`cutlass::gemm::device::GemmUniversalAdapter`. +This class lives in the header file +[include/cutlass/gemm/device/gemm_universal_adapter.h](/include/cutlass/gemm/device/gemm_universal_adapter.h). +`GemmUniversalAdapter` is a stateful, reusable handle, +which is parameterized on the `cutlass::gemm::kernel` type. + +```c++ +/*! + GemmUniversalAdapter is a stateful, reusable GEMM handle built around a kernel + of type cutlass::gemm::kernel::* + + It manages the lifetime of the underlying `kernel::Params` struct, and exposes APIs + to create it from the host facing arguments. For power users, new static methods + are exposed in 3.x APIs that bypass the stateful methods or args->params lowering. + + It supports kernel types that implement both the 2.x and 3.0 APIs, + however, this is done by specializing the implementation of GemmUniversalAdapter + on the two kernel API types, and thus, GemmUniversalAdapter's behavior might + differ between the two specializations. +*/ +template +class GemmUniversalAdapter; +``` + +*Stateful* means that the handle instance contains state +that the kernel needs to run. +This means that the user must initialize the handle first, +then use the initialized handle instance to run the kernel. +Statefulness also means that the handle can manage the lifetime +of the kernel's `Params` -- the parameters of the kernel itself. +An important duty of `GemmUniversalAdapter` +is to map from the user's `Arguments` -- +what the user sees as the kernel's parameters -- +to the `Params` that the kernel actually sees. +For power users, the class exposes new static methods +in 3.0 APIs that can bypass stateful methods +or go directly to `Params` without intermediate `Arguments`. + +*Reusable* means that the handle instance can be used +to call the kernel multiple times with different arguments +(e.g., different matrices). +Reusing the handle may be more efficient than just +creating a new handle for each kernel invocation. + +*Parameterized on the kernel type* means that +the `GemmUniversalAdapter` class' behavior +depends on the GEMM kernel type (see the next section). +Specifically, `GemmUniversalAdapter` has a template parameter +`GemmKernel`, which is the GEMM kernel type. +Valid template arguments for `GemmKernel` are + +* `cutlass::gemm::kernel::GemmUniversal`, + implementing CUTLASS 3.x API kernels; +* `cutlass::gemm::kernel::GemmUniversal`, + implementing CUTLASS 2.x API kernels; or +* Any valid CUTLASS 2.x `kernel` layer GEMM that + was previously composable with the `device::GemmUniversalAdapter`. + +`GemmUniversalAdapter` presents a single +host-side interface to both 3.0 and 2.x kernels. +CUTLASS accomplishes this by +specializing `GemmUniversalAdapter`'s implementation +on either the 2.x API implementing kernel layer GEMMs, or on the 3.x API +implementing kernel layer GEMMs. The metafunction [`cutlass::gemm::detail::IsCutlass3GemmKernel`](cutlass_3x_backwards_compatibility.md#kernel-api-design-differences) +is what `GemmUniversalAdapter` uses to distinguish between 2.x and 3.x kernels. + +`GemmUniversalAdapter` sets up and launches the kernel, using the +CUDA extended launch API for threadblock cluster support if required. +Note, `GemmUniversalAdapter` does *not* specify the grid shape. +The kernel controls the grid shape +and other kernel-specific launch parameters. +This makes it possible for all 3.0 kernels +to use the same kernel launch code, +thus factoring out kernel launch from the actual kernel. + +## Tiled MMA and Copy + +The Tiled MMA or Copy are tilings of MMA atoms resp. Copy atoms +across threads and data, with possible permutations applied to the +resulting tiling. This layer is most analogous to the warp level +tiling of MMA instructions in CUTLASS 2.x. However, it views the tiling +from the perspective of all threads participating in the operation +and generalizes the concept to copy operations as well. The purpose +of this layer is to build composable GPU micro-kernels out of a plethora +of hardware accelerated math and data movement operations, each with their +unit layouts in threads and data. The tiled MMA and Copy types present +all these various hardware accelerated CuTe Atoms with a single, consistent +API. + +The resulting tiled operation acts as a single MMA or copy operation +that users can invoke in the "inner" loop +of the three-nested-loops pseudocode +at the top of this document using `cute::gemm()` or `cute::copy()`. + +We call this API "tiled" because it constructs +larger operations out of the Atoms provided by CuTe, +as if fitting together individual tiles +to build a reusable component of a mosaic. +For example, CuTe might provide an MMA Atom +that users can call on a single warp, +for fixed M, N, and K dimensions. +CUTLASS can then use CuTe operations like `make_tiled_mma` +to turn this Atom into an operation +that works on an entire thread block, +for larger M, N, and K dimensions. + +## Atom API + +An "Atom" is the smallest collection of threads and data +that must participate in the execution of a hardware-accelerated +math or copy operation. + +An Atom is "atomic" (indivisible) not in the sense of +concurrent memory operations like `atomicAdd` +(which are "indivisible in time (causality)"), +but in the sense of indivisibility in "space" -- +the number of values and the groups of parallel workers +that must participate in the operation together. + +An Atom uses CuTe Layouts to express the required +dimensions and strides of its input and output arrays. +Generally these are fixed at compile time. + +The Atom API wraps calls to actual hardware instructions +that accelerate MMA or copy operations. +Users can ask for GPU architecture-specific implementations, +or just pick generic implementations and rely on +whatever GPU architectures were enabled. + +For more information about Atoms, +please refer to CuTe's tutorial, e.g., the sections on + +* [algorithms](./cute/04_algorithms.md) like `gemm` and `copy`, + +* [MMA Atoms](./cute/0t_mma_atom.md#cute-mma-atoms), and + +* [a GEMM example](./cute/0x_gemm_tutorial.md). + +# Copyright + +Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/grouped_scheduler.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/grouped_scheduler.md new file mode 100644 index 0000000000000000000000000000000000000000..facbd286147d374254a71fee333cd29d0c54b049 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/grouped_scheduler.md @@ -0,0 +1,388 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Grouped Kernel Schedulers") + +[README](/README.md#documentation) > **Grouped Kernel Schedulers** + +# CUTLASS Grouped Kernel Schedulers + +CUTLASS's grouped kernel is a persistent kernel which launches multiple problems (e.g., GEMMs, SYR2Ks) within a +single CUDA kernel launch. + +Unlike a conventional GEMMs in CUTLASS, which launch a number of threadblocks equal to the number +of tiles in the GEMM, CUTLASS grouped kernels typically launch a number of threadblocks that is +fewer than the total number of tiles across all problems in the group. Each threadblock is then +responsible for computing one or more tiles among the problems in the group. The grouped kernel +_scheduler_ (referred to as the _problem visitor_ in code) is responsible for assigning each +threadblock the sequence of tiles that it will compute within the group. + +This document provides background on the functionality of the grouped kernel scheduler, and describes +various optimizations to the grouped kernel scheduler. + +**Outline** + +* [Introduction to Grouped Kernel Schedulers](grouped_scheduler.md#introduction-to-grouped-kernel-schedulers) +* [Grouped GEMM Scheduler](grouped_scheduler.md#grouped-gemm-scheduler) +* [Grouped Rank2K Scheduler](grouped_scheduler.md#grouped-rank2k-scheduler) +* [Scheduler Modes](grouped_scheduler.md#scheduler-modes) +* [Improving Load Balance by Sorting Problems](grouped_scheduler.md#improving-load-balance-by-sorting-problems) + +# Introduction to Grouped Kernel Schedulers +Given a group of problem sizes and a grid of threadblocks, the scheduler's job is to assign +tiles from problems in the group to threadblocks. Threadblocks in a grouped kernel persistently +execute a loop of querying the scheduler for the next tile to compute and performing the +kernel-level operations for that tile (e.g., MMA and epilogue). In pseudocode, this looks as +follows: +```c++ +ProblemVisitor problem_visitor; + +while (problem_visitor.next_tile()) { + // + // Get next tile index from scheduler + // + + // + // Compute MMA and epilogue + // + + // Inform the scheduler that we are done with the current tile + problem_visitor.advance(gridDim.x); +} +``` + +The key functionality of the grouped kernel scheduler lies in the `next_tile()` method, +which determines which tile in the group the calling threadblock should compute next, if any. + +# Grouped GEMM Scheduler +The scheduler used by grouped GEMM assigns tiles in the group to threadblocks in a round-robin +fashion. + +Consider, for example, the threadblock-to-tile mapping that occurs for a group of four GEMMs +each consisting of a grid of 2x2 tiles. Suppose that eight threadblocks are launched. The +figure below illustrates the threadblock ID assigned to each tile in each GEMM in the group. + +![ALT](/media/images/grouped-gemm-schedule-2x2.png "CUTLASS grouped GEMM scheduler assigning threadblocks to four GEMMs with 2x2 grids of tiles") + +A similar mapping for problems that do not have the same number of tiles +is shown below: + +![ALT](/media/images/grouped-gemm-schedule-varied.png "CUTLASS grouped GEMM scheduler assigning threadblocks to four GEMMs with varying tile count") + +## Computing the schedule for a given block +Each threadblock in the grouped GEMM computes its own schedule by calling +the `next_tile()` method described above. + +To do this, the threadblock's `ProblemVisitor` maintains a `thread_idx` +member that is initialized to `blockIdx.x` and is incremented by +`gridDim.x` between each tile computed (only the x dimension is used) +in the launch configuration for grouped kernels). The scheduler must +then figure out which GEMM in the group `tile_idx` belongs to, and which tile +within that problem it maps to. + +1. **Determining which GEMM `tile_idx` maps to:** The scheduler determines +the GEMM to which `tile_idx` belongs by iterating through GEMMs starting with +the most-recently visited GEMM, and adding the number of tiles within that +GEMM to a running variable `problem_tile_start`. The scheduler has found the +correct problem for this tile when `problem_tile_start <= tile_idx < problem_tile_start + tiles_in_problem`. + +2. **Determining the tile within a GEMM `tile_idx` maps to:** Once the GEMM +to which `tile_idx` maps has been located, the specific tile within that +GEMM that this block should compute is given by `tile_idx - problem_tile_start`. +Simple rasterization is then performed to map this one-dimensional tile ID +into the two-dimensional coordinate of the tile to compute in the GEMM. + +We describe how this search is accelerated in [Scheduler Modes](grouped_scheduler.md#scheduler-modes). + +# Grouped Rank2K Scheduler +The previous section described the operation of the scheduler used +for grouped GEMM kernels. While this scheduler is sufficient for +correctly implementing grouped Rank2K operations (i.e., SYR2K and HER2K), it leads to significant inefficiencies. + +We next describe these inefficiencies as well as how the CUTLASS +grouped Rank2K scheduler overcomes them. + +## Inefficiency of grouped GEMM scheduler for grouped Rank2K problems +The grouped GEMM scheduler assumes that every tile in every GEMM in the group will +ultimately affect the output of the problem. This is not the case for Rank2K +problems, for which matrix C is either upper or lower triangular. Using the default +grouped GEMM scheduler for such problems will thus lead to threadblocks frequently +being assigned to tiles that exit early (e.g., due to being assigned to a tile in the +upper-triangular portion of a lower-triangular problem). This further leads to load +imbalance among threadblocks, as the grouped GEMM scheduler assigns nearly the same +number of tiles to all threadblocks, regardless of how many tiles are truly active. + +Consider an example of a group of four SYR2K problems, each with matrix C consisting +of a grid of 2x2 tiles. Matrix C in each problem is lower triangular, indicated by +shaded tiles. Consider that eight threadblocks are launched to compute the grouped +problem. The default grouped GEMM scheduler will assign threadblocks to tiles in the following order: + +![ALT](/media/images/grouped-syr2k-schedule-using-grouped-gemm-scheduler.png "CUTLASS grouped GEMM scheduler assigning threadblocks to four SYR2Ks with 2x2 grids of tiles") + +In this case, threadblocks 1 and 5 are continuously assigned to inactive tiles. In +scenarios in which problems within the group have varying size, we have observed +this to still lead to significant load imbalance. + +## Specializing the scheduler for triangular problems +We seek to design a scheduler that more efficiently maps threadblocks to active tiles +for kernels that use triangular output matrices. The scheduler should ideally assign +threadblocks only to those tiles within lower-triangular portion of a +lower-triangular problem (and vice-versa for upper-triangular problems). + +Using the example above, the resulting assignment of threadblocks to tiles from +such a scheduler might be: + +![ALT](/media/images/grouped-syr2k-schedule-ideal.png "CUTLASS grouped SYR2K scheduler assigning threadblocks to four SYR2Ks with 2x2 grids of tiles") + +Achieving this schedule requires mapping from a threadblock ID to tile coordinates + `(i, j)`. + +We will illustrate this by mapping a lower-triangular matrix with a 3x3 grid. We +first calculate row and column indices assuming one-indexed rows, tiles, and +threadblock IDs, and then subtract one to convert to zero-indexed versions. Our +description borrows heavily from the mapping described [here](https://stackoverflow.com/a/40954159). + +![ALT](/media/images/grouped-syr2k-schedule-3x3.png "CUTLASS grouped SYR2K scheduler assigning threadblocks to one SYR2K with a 3x3 grids of tiles") + +### Calculating row `i` given threadblock ID `t` +For a given row i, all threadblock IDs t in that row satisfy the following: +``` +t <= 1 + 2 + 3 + ... + (i-1) + i +``` + +The closed-form equation for the right-hand side is: `i(i+1)/2`. +Using this, we can solve for `i` given `t`: +``` +t <= i(i+1)/2 +2t <= i^2 + i +2t <= i^2 + i + 0.25 - 0.25 +2t + 0.25 <= i^2 + i + 0.25 +2t + 0.25 <= (i + 0.5)^2 +sqrt(2t + 0.25) - 0.5 <= i +``` + +To account for fractional values, we set: +``` +i = ceil(sqrt(2t + 0.25) - 0.5) +``` + +To turn this into a zero-indexed row and work with zero-indexed `t`, we perform: +``` +i = ceil(sqrt(2(t+1) + 0.25) - 0.5) - 1 + = ceil(sqrt(2t + 2.25) - 0.5) - 1 +``` + +### Calculating column `j` given threadblock ID `t` and row `i` +For a given row `i`, all threadblock IDs `t` in that row also satisfy the following: +``` + t > 1 + 2 + 3 + ... + (i-2) + (i-1) +--> t > i(i-1)/2 +``` + +Threadblock IDs within a given row are sequential, so the one-indexed column ID +for one-indexed threadblock ID `t` and row `i` is: +``` +j = t - (i(i-1)/2) +``` + +The zero-indexed version becomes: +``` +j = (t+1) - (i(i+1)/2) -1 + = t - (i(i+1)/2) +``` + +### Accounting for non-square grids +Though the overall output problem size for Rank2K problems is guaranteed to be square, the +grids used in computing may not be square due to using non-square threadblock shapes. For +example, a threadblock shape of 64x32 operating on a problem of output size 128x128 would +result in a grid of 2x4 tiles. + +This case can be handled by noting that the output resembles a square grid of 2x2 "macro tiles" +each of which contains 2 "true tiles." We can thus first map a threadblock ID to its "macro tile" +using the equations above, and then map it to the "true tile" within its "macro tile." In the example +of a 2x4 grid, this mapping would look as follows: + +![ALT](/media/images/grouped-syr2k-schedule-macro.png "CUTLASS grouped SYR2K scheduler converting a grid into a 'macro grid' for computing tile mappings for non-square grids") + +A zero-indexed threadblock ID `t` is mapped to its "macro tile ID" `t_macro` as: +``` +t_macro = t // r +``` +Where `r` is the ratio of the maximum dimension of the grid to the +minimum dimension of the grid (i.e., `r = 4 / 2 = 2` in the previous example). + +One uses `t_macro` and the calculations above to find the row and column in the square matrix to +obtain `i_macro` and `j_macro` (zero-indexed). The mapping from `(i_macro, j_macro) --> (i, j)` +is simply the following: +``` +if (ThreadblockShape::M > ThreadblockShape::N): + r = ThreadblockShape::M / ThreadblockShape::N + i = i_macro + j = (j_macro * r) + (t % r) +elif (ThreadblockShape::M < ThreadblockShape::N): + r = ThreadblockShape::N / ThreadblockShape::M + i = (i_macro * r) + (t % r) + j = j_macro +else: + i = i_macro + j = j_macro +``` + +### Handling cases with grid dimensions that aren't multiples of each other +Even though threadblock shapes M and N are typically multiples of one another, the grid +for a given problem may not have dimensions of the same ratio as that of the threadblock. +For example, a problem of size 132x132 using a threadblock of shape 64x32 will result +in a grid of 3x5 tiles. In this case, there is not an integer number of "true tiles" +per "macro tile." + +When this scenario arises, we simply pad the larger dimension of the grid such that +there are an integer number of "true tiles" per "macro tile." Thus, the 3x5 grid in +the example above will be treated as a 3x6 grid. Row and column positions for each +tile are calculated as above. Any threadblocks that map to tiles that are outside the +problem range or upper/lower triangular portion (e.g., (2, 5)) will exit early from +this problem and may proceed to the next problem in the group. + +### Handling upper-triangular matrices +The only modification needed for upper-triangular matrices is to swap `i_macro` and `j_macro` in the calculations above. + +# Scheduler modes +The grouped kernel schedulers come with two different modes for finding +the next tile for a block to compute. These techniques are controlled by +the [`cutlass::gemm::kernel::GroupScheduleMode`](../../include/cutlass/gemm/kernel/grouped_problem_visitor.h) enum. +We describe each mode in greater detail below. + +## `GroupScheduleMode::kDeviceOnly` (default) +This scheduler mode performs all scheduling work on the device. It parallelizes +the search for the problem that `tile_idx` maps to by having each thread "own" +a different problem and determine whether `tile_idx` falls within the range of +that problem. + +`GroupScheduleMode::kDeviceOnly` performs this parallelization in a warp-wide +fashion. Each thread in the warp loads a problem size indexed by its lane id and +computes the number of tiles in that problem. A warp-wide prefix sum is used to find +the starting tiles for the set of problems the warp is looking at. At the end of the +prefix sum, each thread holds the starting tile index and tile count for a unique +problem in the group. + +While `tile_idx` remains within the range of the problems currently hosted by the +warp, each thread will check whether `tile_idx` is in the range of its current +problem. The matching problem index and its starting tile are then broadcasted to all +threads in the warp. + +## Precomputing schedules on the host: `GroupScheduleMode::kHostPrecompute` +This scheduler attempts to reduce the amount of scheduling performed on the device +by precomputing on the host the sequence of problems that will +be accessed by each block. As described above, all that is needed to map tile_idx to +the specific tile within a problem to compute is the problem ID and the problem's +starting tile (among all of the tiles in the group). Thus, this scheduler precomputes +the problem index and problem starting tile for each tile computed by each block. + +The schedule for an individual block is represented as an array of +`(problem_idx, problem_starting_tile)` tuples. There is one such array per block. +These arrays are produced on the host and copied over to the device. This +representation is optimized for the case in which blocks compute at most one +tile per problem. When a block computes multiple tiles per problem in the group, +the representation above will result in duplicate entries, and thus will be +suboptimal (e.g., `[(3, 20), (3, 20)]` for a block that computes two tiles in +problem 3, which has starting tile index 20). +We have chosen to use the representation described above because grouped kernels +themselves are typically most beneficial when problem sizes are small, and, thus, +blocks compute at most one tile per problem. + +## Which scheduler mode should I use? +Consider the following questions when deciding which scheduling mode to use: + +### How are the parameters used as input to the grouped kernel (e.g., ptrA, lda) set in my application? +If these are set by a previous kernel running on +the device (rather than by the host), you likely want to use `kDeviceOnly`, +as this will minimize additional host-device communication. + +### Can host-side work be overlapped with other device kernels in my application? +For example, if a grouped GEMM is used as the Nth layer in a neural network, +host-side precomputation for the grouped GEMM can potentially be overlapped +with device-side work for layer N-1. In this case `kHostPrecompute` is likely +a good fit. + +### How compute-intensive are the problems in my group? +The differences in performance between `kHostPrecompute` and `kDeviceOnly` are most +noticeable for grouped kernels with low computational intensity, for which time spent in +the scheduler accounts for a significant fraction of the grouped kernel's runtime. +Intuitively, as problems in a group decrease in computational intensity, a smaller +fraction of the overall runtime will be consumed in performing MMA operations, leading +to a larger fraction of the overall runtime being consumed by scheduling logic. + +Since the scheduling modes affect only the scheduling logic of the grouped kernels, +one expects to see most benefit from `kHostPrecompute` for less computationally-intense +groups. + +# Improving Load Balance by Sorting Problems +The grouped kernel schedulers assign a nearly equal number +of tiles to each block participating in the grouped kernel. Every tile in the +group has the same M and N dimensions. However, the K dimension of each +tile depends on the K dimension of the problem, so tiles may have different +K dimensions. Thus, the K dimension of the +tile plays a significant role in determining how long it takes for a given +tile to be computed. + +## Potential problems with imbalanced K dimension +To ensure that compute load is balanced evenly across blocks, it is important +that the sum of the K dimensions among all tiles a block computes be similar +to that of other blocks; if one block computes far more tiles with a large +value of K than other blocks, it may take longer than the other blocks. + +For example, consider the following group of GEMMs: +``` +0 1152x768x128 +1 1152x768x1024 +2 768x1152x128 +3 768x1152x1024 +``` +If a tile size of 128x128 is used, then each problem will have 54 tiles. +Thus, there are 216 tiles across the group. + +Suppose this grouped GEMM is run on GA100, which has 108 SMs. Suppose that +the occupancy given the parameters of the grouped GEMM is one -- one threadblock +can be active at a time on an SM. The grouped GEMM will, thus, run with 108 +persistent threadblocks, each of which computes (256 / 108) = 2 tiles. + +Under the round-robin assignment of tiles to threadblocks employed by +the grouped GEMM scheduler, the assignment of tiles to threadblocks +in this GEMM will be as follows: +``` +Threadblocks 0-53: Tiles of size 128x128x128 from problem 0 +Threadblocks 54-107: Tiles of size 128x128x1024 from problem 1 +Threadblocks 0-53: Tiles of size 128x128x128 from problem 2 +Threadblocks 54-107: Tiles of size 128x128x1024 from problem 3 +``` + +Following this assignment, threadblocks 54-107 perform significantly more +work than threadblocks 0-53 because they compute two tiles with a K +dimension of 1024, whereas threadblocks 0-53 compute two tiles with K +dimension of only 128. + +Due to this imbalanced assignment, threadblocks 54-107 will run +significantly longer than threadblocks 0-53, leaving threadblocks +0-53 idle for a large fraction of time. + +Clearly, a better assignment of tiles to threadblocks for this +example would involve all threadblocks computing one tile with +a K dimension of 1024 and one tile with a K dimension of 128. +This would better balance the workload among threadblocks. + +## Potential for sorting problems to reduce imbalance +A simple way to potentially reduce load imbalance is to sort the problems in a group in +descending order of their K dimension. This can help to improve load balance +because tiles in a group are assigned in a round-robin fashion to blocks +sequentially, so every block will always be assigned next the tile with +the highest K dimension available. + +Considering the example described above, sorting the problem sizes before +executing grouped GEMM improves the runtime of this grouped GEMM on GA100 with each +scheduling mode by around 30%. + +To ease the process of sorting groups and their associated metadata in this +manner, the device-level grouped kernels provide a `sort_problems()` method. +An example of how to use this may be found in the [grouped GEMM example](../../examples/24_gemm_grouped/gemm_grouped.cu). + +Finally, while sorting problems can be helpful in certain scenarios, it is +not guaranteed to improve performance. In some cases, performance can +decrease when sorting problems due to additional conflicting factors that +affect GEMM performance. We recommend profiling your grouped kernel with +and without sorting to see whether it helps in your case. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/implicit_gemm_convolution.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/implicit_gemm_convolution.md new file mode 100644 index 0000000000000000000000000000000000000000..4418b95a6182fae91b912c7ff7a7f97de0316e37 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/implicit_gemm_convolution.md @@ -0,0 +1,792 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Implicit GEMM API") + +[README](/README.md#documentation) > **Implicit GEMM Convolution** + +# CUTLASS Convolution + +Implicit GEMM is the formulation of a convolution operation as a GEMM (generalized matrix-matrix +product). Convolution takes an activation tensor and applies a sliding filter on it to produce an +output tensor. + +## Introduction + +This release of CUTLASS contains several artifacts related to convolution. + +- [**Implicit GEMM Algorithm**](implicit_gemm_convolution.md#implicit-gemm-algorithm) +- [**CUTLASS Convolution Implementation**](implicit_gemm_convolution.md#cutlass-convolution-implementation) +- [**Convolution Examples**](implicit_gemm_convolution.md#convolution-example) + + +# Implicit GEMM Algorithm + +2-D convolution may be mapped to matrix multiply +by first forming a _convolution matrix_ containing elements of the activations tensor, +then multiplying this by a matrix formed from the filters tensor. +The earliest form of this algorithm constructs the convolution matrix explicitly via an operation +conventionally referred to as `im2col`. The resulting matrix replicates each activation element by a factor +equal to the filter size, consuming additional storage capacity and memory bandwidth. + +The _implicit GEMM_ algorithm is a variation on the blocked, hierarchical GEMM computation in CUDA. +Instead of constructing the convolution matrix explicitly, +it forms tiles of the convolution matrix on the fly +as data are loaded from global memory into Shared Memory +by carefully updating pointers and predicates. +Once the convolution matrix is formed in Shared Memory, +the existing warp-level GEMM components accumulate the result of +convolution and update the output tensor. + +This section describes the structure of an efficient Implicit GEMM Convolution CUDA kernel +for Turing Tensor Cores. + +## Mapping Convolution to GEMM + +The forward convolutional layer computes an output tensor _y = conv(x, w)_ where x(NHWC), w(KRSC), and y(NPQK) +are 4-D tensors. + +This computation may be described by the following analytic function. + +``` +y[n, p, q, k] = sum_c(sum_r(sum_s( x[n, f(p, r), g(q, s), c] * w[k, r, s, c] ))) +``` +where functions _f_ and _g_ are defined as follows. + +``` +f(p, r) = p * stride_h + R - r - 1 + pad_h +g(q, s) = q * stride_w + S - s - 1 + pad_w +``` + +A [host](/tools/util/include/cutlass/util/reference/host/convolution.h) and [device](/tools/util/include/cutlass/util/reference/device/convolution.h) +reference implementation are provided in the CUTLASS Utilities. + +This computation may be mapped to the elements of a matrix product as follows. + +``` +C = gemm(A, B) +``` +where +- A is a row-major matrix of extent _NHW_-by-_RSC_ containing activations +- B is a column-major matrix of extent _RSC_-by-_K_ containing filters +- C is a row-major matrix of extent _NPQ_-by-_K_ containing the output + +Each element of the output matrix _Cij_ corresponds to an element in the output tensor y[n, p, q, k] according to +the following relation. +``` +y[n, p, q, k] = Cij +``` +where +``` +i = q + Q * (p + P * n) +j = k +``` + +These relations may be inverted as follows. +``` +k = j + +n = i / (PQ) +residual = i % (PQ) + +p = residual / Q +q = residual % Q +``` + +The triple loop nest iterating over CRS to accumulate the result may also be linearized and mapped to the inner +GEMM _K_ dimension (not to be confused with the filter tensor dimension _K_) by the following relations. + +``` +gemm_k = s + S * (r + R * c) +``` +and inverse +``` +c = gemm_k / (RS) +residual = gemm_k % (RS) + +r = residual / S +s = residual % S +``` + +Given these equations, a GEMM triple loop nest could be augmented with tensor indexing as follows. +```c++ +int GEMM_M = N * P * Q; +int GEMM_N = K; +int GEMM_K = C * R * S; + +for (int gemm_i = 0; gemm_i < GEMM_M; ++gemm_i) { + for (int gemm_j = 0; gemm_j < GEMM_N; ++gemm_j) { + + int n = gemm_i / (PQ); + int npq_residual = gemm_i % (PQ); + + int p = npq_residual / Q; + int q = npq_residual % Q; + + Accumulator accum = 0; + + for (int gemm_k = 0; gemm_k < GEMM_K; ++gemm_k) { + + int k = gemm_j; + + int c = gemm_k / (RS); + int crs_residual = gemm_k % (RS); + + int r = crs_residual / S; + int s = crs_residual % S; + + int h = f(p, r); + int w = g(q, s); + + ElementA a = tensor_A.at({n, h, w, c}); + ElementB b = tensor_B.at({k, r, s, c}); + + accum += a * b; + } + + C[gemm_i * K + gemm_j] = accum; + } +} +``` +The [CUTLASS GEMM implementation](/media/docs/efficient_gemm.md) explicitly iterates over tiles. Consequently, +a tile iterator could be implemented to compute these functions analytically and load the appropriate +elements. However, the resulting modulo arithmetic would be computationally intensive, and overhead would +limit performance of a GEMM kernel targeting Turing Tensor Cores. + +The following section describes how an efficient implementation may be implemented within the structure of +a hierarchical GEMM kernel targeting Tensor Cores. + + +# CUTLASS Convolution Implementation + +To get the best performance, the following parameters are recommended. + +- All tensors are 128-bit aligned NHWC tensors +- Channel count (C) is a multiple of 32 elements +- Filter count (K) is a multiple of 32 elements + +This enables 128-bit vector memory acceses which lead to efficient CUDA kernels. Smaller alignment is supported even on tensor cores by setting AlignmentA and AlignmentB in `conv::kernel::DefaultConv2dFprop`, but the performance is lower than 128-bit aligned tensors. + +# CUTLASS Device-level Convolution Operator + +CUTLASS defines CUDA C++ templates accepting numerous template arguments to specialize the resulting +kernel by operation, data type, tile configuration, math instruction, and fused output operation. + +In [turing_tensorop_conv2dfprop.cu](/examples/09_turing_tensorop_conv2dfprop/turing_tensorop_conv2dfprop.cu), a convolution +operation is defined as follows. + +```c++ +/// Define an Implicit GEMM convolution forward propagation (fprop) kernel +using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop< + ElementInputA, // data type of element a (mapped to activation for fprop) + LayoutInputA, // layout of element a (mapped to activation for fprop) + ElementInputB, // data type of element b (mapped to filters for fprop) + LayoutInputB, // layout of element b (mapped to filters for fprop) + ElementC, // data type of element c (mapped to output for fprop) + LayoutC, // layout of element c (mapped to output for fprop) + ElementAccumulator, // data type of internal accumulation + MMAOp, // opcode class tag + SmArch, // target SM architecture + ThreadblockShape, // shape of threadblock tile + WarpShape, // shape of warp-level GEMM tile + InstructionShape, // shape of target math instruction + EpilogueOp, // epilogue operator + SwizzleThreadBlock, // optional function to reorder threadblocks for locality + NumStages, // number of pipeline stages in threadblock-scoped GEMM + cutlass::arch::OpMultiplyAddSaturate, // math operation on data of element a and b + cutlass::conv::IteratorAlgorithm::kOptimized // global memory iterator algorithm +>::Kernel +``` + +This template is intended to be generic and cover all feasible configurations. The example specifies +the following concrete data types, layouts, and tile shapes. + +```c++ +/// Define an Implicit GEMM convolution forward propagation (fprop) kernel +using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop< + cutlass::int4b_t, // data type of element a (mapped to activation for fprop) + cutlass::layout::TensorNHWC, // layout of element a (mapped to activation for fprop) + cutlass::int4b_t, // data type of element b (mapped to filters for fprop) + cutlass::layout::TensorNHWC, // layout of element b (mapped to filters for fprop) + int32_t, // data type of element c (mapped to output for fprop) + cutlass::layout::TensorNHWC, // layout of element c (mapped to output for fprop) + int32_t, // data type of internal accumulation + cutlass::arch::OpClassTensorOp, // opcode class tag + cutlass::arch::Sm75, // target SM architecture + cutlass::gemm::GemmShape<128, 128, 128>, // shape of threadblock tile + cutlass::gemm::GemmShape<64, 64, 128>, // shape of warp-level GEMM tile + cutlass::gemm::GemmShape<8, 8, 32>, // shape of target math instruction + cutlass::epilogue::thread::LinearCombinationClamp< + int32_t, // data type of output matrix + 8, // The number of elements per vectorized + // memory access. This becomes the vector width of + // math instructions in the epilogue too. + int32_t, // Data type of accumulator + float>; , // epilogue operator + SwizzleThreadBlock, // optional function to reorder threadblocks for locality + 2, // number of pipeline stages in threadblock-scoped GEMM + cutlass::arch::OpMultiplyAddSaturate, // math operation on data of element a and b + cutlass::conv::IteratorAlgorithm::kOptimized // global memory iterator algorithm +>::Kernel +``` + +That is, this computes 2D convolutional forward propagation with 4-bit integer inputs and outputs (`cutlass::int4b_t`). +Internal accumulation is performed using 32-bit integers (`int32_t`), and an elementwise linear combination operation +is performed on the output in single-precision floating point (`float`). + +The threadblock and warp-level tile shapes refer to the hierarchically blocked GEMM computation +[described here](/media/docs/gemm_api.md). Larger tiles achieve greater reuse of data loaded through shared memory +but launch fewer CTAs and may not fully occupy the GPU for small problem sizes. Smaller tile configurations achieve +lower peak utilizations but may better match the number of SMs within the GPU for real-world workloads. + + +## Launching the convolution + +The following code collects the arguments for an implicit GEMM operation into a structure. + +```c++ +// +// Define arguments for CUTLASS Convolution +// + +// mode (kCrossCorrelation or kConvolution) +cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation; + +// Split K dimension into 1 partitions +int split_k_slices = 1; + +cutlass::conv::Conv2dProblemSize problem_size( + options.input_size, + options.filter_size, + options.padding, + options.conv_stride, + options.dilation, + options.output_size(), + mode, + split_k_slices); + +typename ImplicitGemm::Arguments arguments{ + problem_size, + tensor_a.device_ref(), + tensor_b.device_ref(), + tensor_c.device_ref(), + tensor_c.device_ref(), + {options.alpha, options.beta}, +}; +``` + +The `mode` flag indicates whether to compute cross correlation or convolution. The arguments +`input_size`, `filter_size`, `padding`, `conv_stride`, and `dilation` specify the dimensions of the +input and output tensors and characterize the problem size. + +The arguments `tensor_a.device_ref()`, `tensor_b.device_ref()`, and `tensor_c.device_ref()` are +CUTLASS `TensorRef<>` objects containing a pointer to the tensor data in GPU device memory and stride values. + +The following code initializes and launches the Implicit GEMM operation on the device. After initializing +the arguments structure, it is used to query device-side workspace requirements and allocate them +in device memory if needed. + +Then, the Implicit GEMM object is initialized with the `arguments` structure and the workspace in +device memory. This initialization step precomputes internal lookup tables used by the convolution kernel +and may also clear the device-side workspace if needed. + +Finally, the initialized Implicit GEMM object is called, launching a kernel on the device. `tensor_c` now +contains the result of the implicit GEMM. + +```c++ +ImplicitGemm implicit_gemm_op; + +// Query workspace size +size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments); + +// Allocate workspace memory +cutlass::device_memory::allocation workspace(workspace_size); + +// Initialize the Implicit GEMM object +cutlass::Status status = implicit_gemm_op.initialize(arguments, workspace.get()); + +if (status != cutlass::Status::kSuccess) { + /* error */ +} + +// +// Launch initialized CUTLASS kernel +// + +status = implicit_gemm_op(); + +if (status != cutlass::Status::kSuccess) { + /* error */ +} +``` + +The example demonstrates how the input and output tensors may be written to a file as CSV using +`cutlass::HostTensor<>` defined in the [CUTLASS Utilities](/media/docs/utilities.md). + +```c++ + std::ofstream output_workspace(ss.str()); + + output_workspace + << "Input = \n" << tensor_a.host_view() << "\n\n" + << "Filters = \n" << tensor_b.host_view() << "\n\n"; + + // Copy device memory to host backing store + tensor_c.sync_host(); + + output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl; +``` + + +## CUTLASS Components + +CUTLASS defines the following CUDA C++ templates to implement Implicit GEMM Convolution which are described in greater detail in subsequent sections. + +**Activations tile iterators** load the activations tile into registers. Two implementations are provided: +- [conv2d_fprop_activation_tile_access_iterator_analytic.h](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h) computes pointer deltas and masks analytically +- [conv2d_fprop_activation_tile_access_iterator_optimized.h](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h) optimizes iterating over global memory and +creating GEMM-A tile in shared memory. + +**Filter tile iterators** load filters into registers. Similarly, two implementations are provided: +- [conv2d_fprop_filter_tile_access_iterator_analytic.h](/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h) computes pointer deltas and masks analytically +- [conv2d_fprop_filter_tile_access_iterator_optimized.h](/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h) optimizes iterating over global memory and +creating GEMM-B tile in shared memory. + +The improvements covered by optimized iterators are: + +a. Precomputing kernel-invariant pointer deltas on the host +b. Computing cta-invariant mask predicates on device-side iterator ctors +c. Use of [fast divmod](/include/cutlass/fast_math.h) to map GEMM dimensions to convolution tensors. + +For example, an _optimized_ activation iterator uses fast divmod to map GEMM _M_ to NPQ. + +**Pipelined mainloop** loads threadblock-scoped tiles from global memory into shared memory and then applies +CUTLASS warp-level GEMM operations to load from Shared Memory and issue instructions to Turing Tensor Cores. +- [mma_pipelined.h](/include/cutlass/conv/threadblock/implicit_gemm_pipelined.h) + +Operations for storing to shared memory and performing warp-wide matrix multiply operations using +Turing Tensor Cores are applied directly from the CUTLASS GEMM components. These include the +following components. + +**Regular Tile Iterator** implemented in +[transform::threadblock::RegularTileIterator](/include/cutlass/transform/threadblock/regular_tile_iterator.h) +stores register-backed fragments to Shared Memory in permuted layouts. + +**Warp-level GEMM** defined in [cutlass::gemm::warp::MmaTensorOp](/include/cutlass/gemm/warp/mma_tensor_op.h) +defines tile iterators to load from Shared Memory and issue math instructions to Turing Tensor Cores. +Further details are [described in here](/media/docs/gemm_api.md#warp-level-matrix-multiply-api). + +**Epilogue** reorders accumulator elements among threads within a threadblock to efficiently update +the output tensor. It is implemented in [epilogue::threadblock::Epilogue](/include/cutlass/epilogue/threadblock/epilogue.h). + +### Loading Activations and Filters + +The Implicit GEMM Convolution algorithm partitions the GEMM _K_ dimension (of extent _CRS_) into +threadblock tiles and assigning each threadblock tile to one filter position and an interval +of channels. After iterating over all filter positions, the convolution algorithm advances to the +next interval of channels and proceeds from filter `r=0, s=0`. + +The matrix product of one threadblock tile is computed per iteration of +the mainloop as described in the [CUTLASS GEMM implementation](/media/docs/efficient_gemm.md). To +summarize, the threadblock tile of activations and filters are loaded from tensors in global memory +and stored to shared memory. Each thread within the threadblock loads one or more vectors and +collectively span the entire tile. + +The following figure illustrates one particular iteration of the Implicit GEMM mainloop. Each +thread within the threadblock is mapped to several vectors of elements in the Activations and +Filters tensors. Each index in the GEMM _M_ dimension corresponds to a unique _(N,P,Q)_ +index of the output tensor, and pointers may be computed based on this as well as +filter position _(r,s)_. + +![ALT](/media/images/conv2d-fprop-int4.png "Convolution Forward Propagation on INT4 data.") + +The CUTLASS component that embodies this functionality is [Conv2dFpropFilterTileAccessIteratorAnalytic](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h). +Its constructor computes the mapping of GEMM _M_ to _(N, P, Q)_, the `at()` method maps the linear offset into the Activations +tensor for each memory access the thread is to perform. Additionally, the method `valid()` computes the valided of the access +for each filter position and for each memory access to indicate whether the memory access will be within the bounds of the +tensor or out of bounds. + +`operator++()` iterates over memory accesses performed by a thread in both contiguous and strided dimension. + +```c++ +// cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h + +// Update iterator to thread's next contiguous, strided memory access +Conv2dFpropActivationTileAccessIteratorAnalytic &operator++() { + ++iteration_contiguous_; + if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { + return *this; + } + iteration_contiguous_ = 0; + + ++iteration_strided_; + if (iteration_strided_ < ThreadMap::Iterations::kStrided) { + return *this; + } + iteration_strided_ = 0; + + return *this; +} +``` + +After all accesses have been visited for the current threadblock tile, `advance()` updates the pointers to next tile. +Offsets added to each pointer follows the traversal of filter positions, performing one of the +following: +- advance from filter position _(r, s, c)_ to filter position _(r, s+1, c)_ +- advance from filter position _(r, S-1, c)_ to filter position _(r+1, 0, c)_ +- advance from filter position _(R-1, S-1, c)_ to filter position _(0, 0, c+32)_ + +This logic within method `advance()`'s body computes the above three updates for the activation GEMM-A tile. + +```c++ +// cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h + +// Advance to the next access +void advance() { + // moves to the next tile + ++filter_s_; + if (filter_s_ < problem_size_.S) { + return; + } + filter_s_ = 0; + + ++filter_r_; + if (filter_r_ < problem_size_.R) { + return; + } + filter_r_ = 0; + + filter_c_ += Shape::kRow * problem_size_.split_k_slices; +} +``` + +Similar logic holds for [Conv2dFpropFilterTileAccessIteratorAnalytic](/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h). + +To reduce computational overhead in the mainloop body, the pointer offsets may be precomputed +in host code and provided to the CUDA kernel as a lookup table in its `Params` structure. +As shown in [Conv2dFpropFilterTileAccessIteratorOptimized](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h), +the logic to compute offsets from filter position has been extracted to the `Params` constructor. + +```c++ +// cutlass/conv/threadblock/conv2d_params.h +struct Conv2dFpropActivationIteratorOptimizedParams { + ... +// next S +inc_next[0] = conv_sign * (int64_t(layout.stride()[0]) * problem_size.dilation_w) * element_size_bits / 8; + +// next R +inc_next[1] = conv_sign * ( + int64_t(layout.stride()[1]) * problem_size.dilation_h + - (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w + ) * element_size_bits / 8; + +// next C +inc_next[2] = ( + threadblock_shape.column() * problem_size.split_k_slices + - conv_sign * int64_t(problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h + - conv_sign * int64_t(problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w + ) * element_size_bits / 8; + + ... +} +``` + +This allows only a simple lookup from the _delta table_ performed in device code in `Conv2dFpropActivationTileAccessIteratorOptimized::advance()`. + +```c++ +// cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h +CUTLASS_HOST_DEVICE +void advance() { + + int next_idx = 0; + + // moves to the next tile + ++filter_s_; + if (filter_s_ == problem_size_.S) { + filter_s_ = 0; + ++filter_r_; + + if (filter_r_ < problem_size_.R) { + next_idx = 1; + } + else { + filter_r_ = 0; + next_idx = 2; + } + } + + add_byte_offset_(params_.inc_next[next_idx]); // in addition to Conv2dFpropActivationTileAccessIteratorAnalytic::advance() + + if (next_idx == 2) { + filter_c_ += params_.filter_c_delta; + } +} + +``` + +### Making use of Tensor Cores + +Turing Tensor Cores compute matrix multiply-accumulate operations efficiently by sharing data among all +threads within a warp. The following operations are supported. + +| **Shape** | **A** | **B** | **C** | +|-----------|---------|---------|---------| +| 8x8x32 | int4b_t | int4b_t | int32_t | +| 8x8x16 | int8b_t | int8b_t | int32_t | +| 16x8x8 | half | half | half | +| 16x8x8 | half | half | float | + +Functionally, the Turing 8x8x32 matrix multiply operation distributes the _A_, _B_, and _C_ matrix across 32 +threads within a warp according to the following illustration. + +![ALT](/media/images/mma-8x8x32.png "Turing Tensor Op") + +This Tensor Core operation is accessible to the CUDA programmer via the PTX instruction +[`mma.sync`](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-8832). +CUTLASS wraps inline PTX with device-side intrinsics defined in [`cutlass/arch/mma_sm75.h`](/include/cutlass/arch/mma_sm75.h) +as in the following example. + +```c++ +unsigned A; // eight packed 4-bit integer elements +unsigned B; // eight packed 4-bit integer elements + +int C[2]; // two 32-bit integer elements +int D[2]; // two 32-bit integer elements + +asm volatile( + "mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n" + : "=r"(D[0]), "=r"(D[1]) + : "r"(A), "r"(B), "r"(C[0]), "r"(C[1])); +``` + +To load data efficiently from Shared Memory into registers with the distribution among +warps matching the above, the Turing GPU architecture introduces +[`ldmatrix`](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-ldmatrix). +`ldmatrix` is the ultimate warp-cooperative instruction, as all threads contribute addresses to up to 32 row vectors of +size 128-bits in length. These rows are fetched from Shared Memory and then distributed among groups of four threads +per row. + +The arrangement of SMEM pointers and destination registers within threads is illustrated as follows. Thread 0 is highlighted +in the illustration to emphasize the mapping. + +![ALT](/media/images/ldmatrix-8x128bx4.png "Turing ldmatrix PTX instruction") + +The size of the Turing Tensor Core operation computing matrix multiply-accumulate on INT4 data is 8-by-8-by-32 +elements. `ldmatrix` fetches up to 32 rows (or columns) per operation. Sixteen Tensor Core operations may be issued +to implement a 32-by-32-by-32 matrix product and perfectly consume all data loaded by two `ldmatrix` instructions +as shown in the following figure. Larger tiles are possible by increasing the number of memory instructions +and issuing more Tensor Core operations, up to warp-level matrix operations of size 64-by-64-by-32. The limit is +the number of registers to hold the accumulator elements. + +![ALT](/media/images/ldmatrix-tensorop-32x32x32.png "Turing ldmatrix PTX instruction feeding Tensor Core operations") + +### Shared Memory Layouts + +In the previous two sections, we have described how data may be loaded from activations and filters tensors +in global memory to compute convolution, and we have described a composition of `ldmatrix` and `mma.sync` +to fetch data from Shared Memory and issue Tensor Core operations. + +To ensure this data movement is efficient, care must be taken to ensure bank conflicts are avoided. CUTLASS +uses a permuted Shared Memory layout to avoid bank conflicts when storing to Shared Memory and to efficiently +load from Shared Memory using `ldmatrix`. The following figure illustrates the thread mapping used for +the loading the activations and filters threadblock tiles from global memory and the permuted layout in +Shared Memory. + +![ALT](/media/images/tensor-op-permuted-smem-layout-TN.png "Shared Memory layout used for Turing Tensor Cores") + +In the illustration, one warp-wide memory access is highlighted in blue, with individual threads +loading one 128-bit vector. The tile in global memory could correspond either to the activations +or filters and is assumed to be 'strip-mined' with four threads loading consecutive channels. + +Shared Memory is visualized as a 'row-major' matrix with eight columns representing +the eight 128-bit banks. +As described in the CUTLASS GTC 2019 presentation [slides](https://developer.download.nvidia.com/video/gputechconf/gtc/2019/presentation/s9593-cutensor-high-performance-tensor-operations-in-cuda-v2.pdf), +[recording](https://developer.nvidia.com/gtc/2019/video/S9593), an access to Shared Memory will be conflict-free if +the following conditions are satisfied across each warp: +- {T0, T1, .., T7} do not access the same 128-bit bank +- {T8, T9, .., T15} do not access the same 128-bit bank +- {T16, T17, .., T23} do not access the same 128-bit bank +- {T24, T25, .., T31} do not access the same 128-bit bank + +To achieve conflict-free stores, the Shared Memory layout remaps the strip-mined arrangement to transpose +the vectors and applies an XOR operation on the column index of each thread's pointer. Specifically, + +```c++ + int store_column = (lane_id % 8) ^ (lane_id / 8); +``` + +This transformation on the layout will be instrumental in reading slices of data from Shared Memory +to compute the warp-level matrix multiply using Tensor Cores. + +The following figure shows how the first sixteen threads participating in an `ldmatrix` instruction +logically map to the c=0..31 slice of a matrix in Shared Memory. This slice is known as a "k-group" +within the code because it corresponds to the same K-index of a warp-level matrix multiply. + +![ALT](/media/images/tensor-op-permuted-smem-layout-TN-k0.png "Load kgroup=0 from Shared Memory using ldmatrix") + +The lower half of the figure shows the physical arrangement in Shared Memory, with threads offset by row and column +according to the XOR function. By inspection, we can observe there are no bank conflicts, as _T0 ... T7_ each access unique +banks, as do _T8 ... T15_. and beyond. + +To advance to the next "k-group" within Shared Memory, pointers are updated using an XOR operation according to +the following sequence: +- **^1** advances from _k=0_ to _k=1_ +- **^3** advances from _k=1_ to _k=2_ +- **^1** advances from _k=2_ to _k=3_ +- **^3** advances from _k=3_ to _k=0_ + +The first of these transitions is shown below. +![ALT](/media/images/tensor-op-permuted-smem-layout-TN-k1.png "Advance to kgroup=1 from Shared Memory using ldmatrix") + +The [CUTLASS warp-level GEMM API](/media/docs/gemm_api.md#warp-level-matrix-multiply-api) defines templates for +loading slices of data from permuted Shared Memory and issuing operations to Tensor Cores. + +### Updating the Output Tensor + +After the mainloop terminates, the accumulator tile of the warp-level GEMM stores a warp's contribution to the output +tensor. However, the distribution of data among threads within the threadblock is specialized for efficient matrix multiply-accumulate +operations using Tensor Cores and is not conducive to efficient, coalesced operations to Global Memory. A data rearrangement is +needed. + +The **Epilogue** is the component for exchanging accumulator elements through Shared Memory, loading slices of the output +matrix or tensor, applying an elementwise operation such as linear scaling or bias, and storing the result to the output tensor. +CUTLASS structures this as several components: +- [cutlass::epilogue::threadblock::Epilogue](/include/cutlass/epilogue/threadblock/epilogue.h) - the top-level component for looping over the entire threadblock tile +- [cutlass::epilogue::warp::TileIteratorTensorOp](/include/cutlass/epilogue/warp/tile_iterator_tensor_op.h) - a specialized component for storing accumulators for Tensor Core to Shared Memory +- [cutlass::epilogue::threadblock::SharedLoadIterator](/include/cutlass/epilogue/threadblock/shared_load_iterator.h) - a component for loading elements from a row-major arrangement in Shared Memory +- [cutlass::epilogue::threadblock::PredicatedTileIterator](/include/cutlass/epilogue/threadblock/predicated_tile_iterator.h) - a component for loading or storing matrix fragments to Global Memory (with bounds checks) +- [cutlass::epilogue::thread::LinearCombination](/include/cutlass/epilogue/thread/linear_combination.h) - an element-wise function computing `alpha * AB + beta * C` to compute the final output + +## Unit Tests + +Unit tests verify the functional behavior of each of the above components in a standalone CUDA kernel. This provides a +convenient environment to + +a. inspect the template definition, +b. showcase instantiation of use of these templates in device code, and +c. assert functional correctness. + +**Convolution unit tests** +- Device-wide convolution operator: [conv2d_fprop_implicit_gemm_s4nhwc_s4nhwc_s32nhwc_tensor_op_s32_sm75.cu](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4nhwc_s4nhwc_s32nhwc_tensor_op_s32_sm75.cu) + +**GEMM unit tests** +- Warp-scoped matrix multiply for Turing Tensor Cores: [gemm_sm75.cu](/test/unit/gemm/warp/gemm_sm75.cu) + +**Epilogue unit tests** +- Epilogue for Turing Tensor Cores: [epilogue_tensor_op.cu](/test/unit/epilogue/threadblock/epilogue_tensor_op.cu) + + +# Convolution Example + +This section describes the provided convolution example and is intended to orient the reader to the CUTLASS implementation +of Implicit GEMM Convolution. + +## Building and Running the Example + +Example `09_turing_tensorop_conv2dfprop` computes a forward convolutional layer in which inputs and +outputs are 4-b integers. The example source is visible in +[examples/09_turing_tensorop_conv2dfprop/turing_tensorop_conv2dfprop.cu](/examples/09_turing_tensorop_conv2dfprop/turing_tensorop_conv2dfprop.cu). + + +Before building the example, first perform the prerequisite steps for building any CUTLASS component [described here](/media/docs/quickstart.md). +Compute capability 7.5 refers to the Turing architecture, and this work requires CUDA 10.2 Toolkit or later to target +Turing Tensor Cores using the native `mma` [PTX instruction](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-8832). + +```bash +$ mkdir build && cd build + +$ cmake .. -DCUTLASS_NVCC_ARCHS=75 +``` + +To build the example, execute `make 09_turing_tensorop_conv2dfprop` from the build directory. +```bash +$ make 09_turing_tensorop_conv2dfprop + +$ ls examples/09_turing_tensorop_conv2dfprop +examples/09_turing_tensorop_conv2dfprop + +``` + +This example provides a simple command line interface to specify the extents of 4D tensors of 4-bit integer elements (`cutlass::int4b_t`), +initialize them to random values, and compute the result of a convolutional layer. Optionally, the input and output +tensors may be saved to .csv files, and the CUTLASS host-side reference check may be executed to verify correctness. + +The complete usage statement is visible by running with `--help`: +```bash +$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --help +09_turing_tensorop_conv2dfprop example + + This example uses Turing's Tensor Core operators on int4 data types to compute + forward convolution on tensors of layout NHWC. + +Options: + + --help If specified, displays this usage statement. + + --n Input tensor extent N + --h Input tensor extent H + --w Input tensor extent W + --c Input tensor extent C + --k Filter extent K + --r Filter extent R + --s Filter extent S + + --alpha Epilogue scalar alpha + --beta Epilogue scalar beta + + --ref-check If set (true), reference check on the host is computed + --perf-check If set (true), performance is measured. + --benchmark If set (true), performance benchmarking on several layers and batch-size. + --iterations Number of profiling iterations to perform. + --save-workspace If set, workspace is written to a text file. + --tag String to replicate across the first column in the results table + + + +Examples: + +$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1 + +$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check +``` + +*Note*, this example assumes all tensors are 128b aligned and in format _NHWC_. Consequently, dimension +_C_ must be divisible by 32 for activations, filters, and output. + +If the option `--benchmark` is passed, several layers from ResNet50 are profiled for various batch sizes. +This sample output was computed on an NVIDIA RTX 2080 compiled with CUDA 10.2. + +```bash +build$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --benchmark +``` + +Convolution can also be run by the CUTLASS Profiler. + + +# Copyright + +Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/layout.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/layout.md new file mode 100644 index 0000000000000000000000000000000000000000..b2d20ef14ec39f1d617d09cf0c876293abf6feef --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/layout.md @@ -0,0 +1,303 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Layouts and Tensors") + +[README](/README.md#documentation) > **Layouts and Tensors** + +Note: This document talks about CUTLASS 2.x layout tag types. +CUTLASS 3.0 deprecates all legacy 2.x layout tags in favour of a single `cute::Layout` +vocabulary type for all thread and data tensors. Please refer to the +[documentation for cute layouts](cute/01_layout.md) for more details about CUTLASS 3.0's definition of "layout". + +# Layouts and Tensors + +_Tensors_ are mathematical objects represented by a multidimensional array of numeric elements in memory. +These may define two dimensional matrices upon which classical linear algebra computations may be defined or +higher dimensional objects frequently used to structure data used by Deep Learning applications and frameworks. + +This document describes design patterns used in CUTLASS to map logical index spaces onto memory (Layouts) and to +indirectly reference tensors in memory (TensorRef and TensorView objects). + +As described, CUTLASS adheres to the following terminology which is consistent with the C++ Standard Library. + +* *size* (scalar): number of elements in a tensor +* *capacity* (scalar): number of elements needed to represent tensor in memory (may be larger than _size_) +* *rank* (scalar): number of logical dimensions describing tensor +* *extent* (vector): size of each logical dimension in a tensor + +## CUTLASS Layout Concept + +CUTLASS Layouts are a systematic design pattern for the following: +* Mapping _logical_ index space to _physical_ offsets in memory +* Storing the dynamic state needed in the above computation +* Defining a type system for partial specialization of other CUTLASS components + +_Concept:_ layouts satisfy the following concept. +```c++ +/// CUTLASS Layout concept example +struct LayoutConcept { + + /// Logical rank of tensor + static int const kRank; + + /// Rank of stride vector + static int const kStrideRank; + + /// Index type used for coordinates + struct Index; + + /// Long index type used for offsets + struct LongIndex; + + /// Logical coordinate - satisfies Coord + struct TensorCoord; + + /// Stride object - satisfies Coord + struct Stride + + // + // Methods + // + + /// Constructor + CUTLASS_HOST_DEVICE + LayoutConcept(); + + /// Ctor + CUTLASS_HOST_DEVICE + LayoutConcept(Stride stride); + + /// Helper returns a layout to a tightly packed tensor + CUTLASS_HOST_DEVICE + static LayoutConcept packed(TensorCoord const &extent); + + /// Function call operator returns the offset of a coordinate in linear memory. + /// Assumes coordinate has convention (row, column) + CUTLASS_HOST_DEVICE + LongIndex operator()(TensorCoord const &coord) const; + + /// Inverse of layout function, mapping linear offset to logical coordinate + CUTLASS_HOST_DEVICE + TensorCoord inverse(LongIndex offset) const; + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride stride() const; + + /// Returns the stride of the layout + CUTLASS_HOST_DEVICE + Stride & stride(); + + /// Compute the number of contiguous elements needed to store a tensor with the given size + CUTLASS_HOST_DEVICE + LongIndex capacity(TensorCoord const &extent) const; +}; +``` + +_Layout_ objects generalize leading dimensions of matrices typical in _BLAS_ implementations. For example, cuBLAS assumes +Fortran-style _column-major_ layouts of matrices and refers to this as the matrix's "leading dimension." + +```c++ +cublasGemmEx( + ... + ptr_A, // pointer to first element of matrix A + lda, // leading dimension + ... +); +``` +This implies an element at coordinate (_row_, _column_) has offset `row + lda * column`. + +This is equivalently represented by CUTLASS's `layout::ColumnMajor` type as follows. +```c++ + +layout::ColumnMajor layout(lda); + +int offset = layout({row, column}); // returns row + lda * column +``` + +Other layout functions are possible such as row-major: +```c++ + +layout::RowMajor layout(lda); + +int offset = layout({row, column}); // returns lda * row + column +``` + +In both cases, the _logical_ coordinate (_row_, _column_) is represented by the same object. This enables an algorithm to be +implemented as generic template, with locations within tensors always specified in logical space. _Layout_ objects map this to +physical offsets in memory. + +The layout's `::packed()` static method may be used to construct a layout object given the extent of a densely packed tensor. +This method is needed when an algorithm must define a buffer of arbitrary layout. + +Example: +```c++ + +typename ArbitraryLayout::TensorCoord extent = make_Coord(...); +typename ArbitraryLayout::TensorCoord coord; + +ArbitraryLayout layout = ArbitraryLayout::packed(extent); + +int offset = layout({coord}); +``` + +The layout's `::capacity()` method computes the number of locations in memory needed to represent a tensor. This is +useful when allocating memory, as more storage may be needed than what is strictly necessary for a fully packed +tensor. + +Example: +```c++ + +int lda = columns + padding; +MatrixCoord extent{rows, columns}; + +layout::RowMajor layout(lda); + +auto capacity = layout.capacity(extent); // returns rows * (columns + padding) +``` + +## Accessing elements within a tensor + +### TensorRef + +`TensorRef` is a structure containing both a pointer to the start of a +tensor and a layout object to access its elements. This is a convenient object which may be +passed to functions to limit an explosion of arguments when the number of stride elements is +numerous. + +Example: +```c++ +int4_t *ptr = ...; +int ldm = ...; + +int row = ...; +int column = ...; + +layout::ColumnMajor layout(ldm); +TensorRef ref(ptr, layout); + +int4_t x = ref.at({row, column}); // loads a 4-bit signed integer from the tensor + +ref.at({row, column}) = x * 2_s4; // transforms this quantity and stores it back +``` + +### TensorView + +Matrices and tensors used in linear algebra computations are invariably finite. `TensorView` extends `TensorRef<>` by +adding an `extent` vector to describe the logical extent of the tensor or matrix. + +Example: +```c++ +int4_t *ptr = ...; +int ldm = ...; +MatrixCoord extent = ...; + +int row = ...; +int column = ...; + +layout::ColumnMajor layout(ldm); +TensorView view(ptr, layout, extent); + +MatrixCoord coord = {row, column}; + +if (view.contains(coord)) { // verify coordinate is in bounds before performing access + + int4_t x = ref.at(coord); + ref.at({row, column}) = x * 2_s4; +} + +``` + +A `TensorView<>` may be constructed from a `TensorRef<>` succinctly as follows: +```c++ +layout::ColumnMajor layout(ldm); +TensorRef ref(ptr, layout); + +TensorView view(ref, extent); // construct TensorView from TensorRef and extent +``` + +Note, computations avoid becoming overdetermined by accepting a single problem size component +and `TensorRef` objects for each of the operands whose extents are implied as a precondition of the operation. By avoiding +redundant storage of extent quantities, CUTLASS minimizes capacity utilization of precious resources such as constant memory. +This is consistent with BLAS conventions. + +# Summary: + +The design patterns described in this document form a hierarchy: +* `T *ptr;` is a pointer to a contiguous sequence of elements of type `T` +* `Layout layout;` is an object mapping an index space to a linear offset +* `TensorRef ref(ptr, layout);` is an object pointing to an _unbounded_ tensor containing elements of type `T` and a layout of type `Layout` +* `TensorView view(ref, extent);` is an object pointing to a _bounded_ tensor containing elements of type `T` and a layout of type `Layout` + +# Appendix: Existing Layouts + +This section enumerates several existing Layout types defined in CUTLASS. + +Matrix layouts: +- `PitchLinear`: data layout defined by _contiguous_ and _strided_ dimensions. _contiguous_ refers to consecutive elements in memory, where as _strided_ refers to data separated by a uniform stride +-- Rank: 2 +-- TensorCoord type: `PitchLinearCoord` +-- Shape type: `PitchLinearShape` +-- Stride rank: 1 + +- `ColumnMajor`: data layout defined by _rows_ and _columns_ dimensions. Can be mapped to `PitchLinear` by: (_contiguous_ = _rows_, _strided_ = _columns_) +-- Rank: 2 +-- TensorCoord type: `MatrixCoord` +-- Shape type: `MatrixShape` +-- Stride rank: 1 + +- `RowMajor`: data layout defined by _rows_ and _columns_ dimensions. Can be mapped to `PitchLinear` by: (_contiguous_ = _columns_, _strided_ = _rows_) +-- Rank: 2 +-- TensorCoord type: `MatrixCoord` +-- Shape type: `MatrixShape` +-- Stride rank: 1 + +- `ColumnMajorInterleaved`: data layout defined by _rows_ and _columns_ dimensions. Data is packed into a 'column-major' arrangement of row vectors of fixed length. +-- Rank: 2 +-- TensorCoord type: `MatrixCoord` +-- Shape type: `MatrixShape` +-- Stride rank: 1 + +- `RowMajorInterleaved`: data layout defined by _rows_ and _columns_ dimensions. Data is packed into a 'row-major' arrangement of column vectors of fixed length. +-- Rank: 2 +-- TensorCoord type: `MatrixCoord` +-- Shape type: `MatrixShape` +-- Stride rank: 1 + +Tensor layouts: +- `TensorNHWC`: + +Permuted Shared Memory Layouts: +- `TensorOpCongruous` +- `TensorOpCrosswise` + + +# Copyright + +Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/pipeline.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/pipeline.md new file mode 100644 index 0000000000000000000000000000000000000000..1107b820acdbb37d540d031ca24cebc871082f51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/pipeline.md @@ -0,0 +1,210 @@ +# Synchronization primitives + +## Overview of CUDA's synchronization methods + +The CUDA programming model provides 3 abstractions: + +* hierarchical parallelism -- that is, parallel threads + grouped into hierarchical units such as blocks and clusters; + +* shared memory, through which parallel threads that are + in the same hierarchical unit can communicate; and + +* synchronization methods for threads. + +These abstractions help developers extract +both fine-grained and coarse-grained parallelism, +by making it possible for them to subdivide problems +into independent components, +and to insert synchronization at appropriate points. + +Over the years CUDA has introduced several synchronization primitives +that operate at different levels of the hierarchy. +These include + +* [thread block - level](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions) synchronization (e.g., `__syncthreads()`); + +* [warp-level](https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/) synchronization (e.g., `__syncwarp()`); and + +* [thread-level](https://docs.nvidia.com/cuda/cuda-c-programming-guide/#memory-fence-functions) fence operations. + +As an extension to this, starting with the Hopper architecture, CUDA added the following improvements: + +* [thread block clusters](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#thread-block-clusters) -- + a new level in the thread hierarchy representing + a group of thread blocks that can coordinate and share data; + +* synchronization instructions for a thread block cluster and threads within a cluster scope. + +## CUTLASS's abstractions for Hopper features + +CUTLASS now includes abstractions +for the following features introduced in Hopper. + +1. Thread block cluster - level synchronization and query + [APIs](/include/cute/arch/cluster_sm90.hpp) + +2. Abstractions for new + [barrier instructions](/include/cutlass/arch/barrier.h) + which help with efficient synchronization + of threads within a thread block cluster. + +### Asynchronous pipelines + +In order to write a performant GEMM Kernel, +software pipelining is critical to hide the latency of global memory loads. +(Please refer to the +[Efficient GEMM](/media/docs/efficient_gemm.md#pipelining) document.) +Different threads or groups of threads +may have different roles in the pipeline. +Some are "producers" that load data or perform computations +to satisfy other threads' input data dependencies. +The same or different threads may be "consumers" +that do other work with those input data dependencies, +once they are satisfied. +Starting with the Hopper architecture, +the presence of hardware-accelerated synchronization instructions +make it possible for "producer" and "consumer" threads +to communicate with each other efficiently +about their data dependencies. + +Implementing a persistent GEMM algorithm calls for managing +dozens of different kinds of asynchronously executing operations +that synchronize using multiple barriers organized as a circular list. +This complexity is too much for human programmers to manage by hand. +As a result, we have developed +[asynchronous Pipeline classes](/include/cutlass/pipeline.hpp). +These classes help developers orchestrate a pipeline +of asynchronous producer and consumer threads, +without needing to worry about lower-level hardware details. +These classes serve a similar function as the various +[pipeline abstractions](https://nvidia.github.io/libcudacxx/extended_api/synchronization_primitives/pipeline.html) +in libcu++. + +#### Pipeline methods + +##### Producer acquire + +The `producer_acquire` method is to be used by asynchronous producer threads +before issuing other instructions associated with a particular pipeline stage +(e.g., copy or write). + +This is a blocking instruction +which blocks further execution of consumer threads +unless the particular stage waiting to be acquired +is released by a consumer. + +We say that a pipeline at its start is "empty" if producer threads are free to produce and do not need to wait for a consumer release -- that is, if an acquire operation is expected to succeed. If the pipeline at its start is empty, then we can either skip performing producer acquire operations during the first pass through the pipeline stages, or use the `make_producer_start_state` method. The latter ensures that the acquire operation will succeed at the start of a pipeline. + +##### Producer commit + +The `producer_commit` method is to be issued by asynchronous producer threads +after the instructions associated with a particular stage +(e.g., shared memory writes) have completed, +in order to notify the waiting asynchronous consumer threads. +This is a nonblocking instruction. + +This API may result in a No-Op in some cases, +if the producer instructions also update the barrier stage associated automatically +(e.g., TMA_based producer threads using the `PipelineTmaAsync ` class). + +##### Consumer wait + +The `consumer_wait` method is to be used by consumer threads +before consuming data from a particular pipeline stage +which is expected to be produced by producer threads. + +This is a blocking instruction. That is, +until the producer threads have committed to a particular stage, +this instruction is expected to block further execution of consumer threads. + +##### Consumer release + +The `consumer_release` method is to be used by consumer threads +to signal waiting producer threads that they have finished consuming data +associated with a particular stage of the pipeline. +This is a nonblocking instruction. + +#### Pipeline example + +```c++ +// 4-stage Pipeline +static constexpr int NumStages = 4; +using MainloopPipeline = typename cutlass::PipelineAsync; +using PipelineState = typename cutlass::PipelineState; + +// 2 producer threads and 1 consumer thread +typename MainloopPipeline::Params params; +params.producer_arv_count = 2; +params.consumer_arv_count = 1; +MainloopPipeline pipeline(shared_storage.storage, params); + +// Producer threads +if (thread_idx == 0 or thread_idx == 1) { + PipelineState smem_pipe_write = cutlass::make_producer_start_state(); + for ( ; iter > 0; --iter) { + pipeline.producer_acquire(smem_pipe_write); + + // Producer ops + // If any memory operations are involved, then we also need + // to guarantee that writes are completed and visible to consumer(s). + + pipeline.producer_commit(smem_pipe_write); + ++smem_pipe_write; + } +} +else if (thread_idx == 2) { + PipelineState smem_pipe_read; + for (; iter > 0; --iter) { + pipeline.consumer_wait(smem_pipe_read); + + // Consumer ops + + pipeline.consumer_release(smem_pipe_read); + ++smem_pipe_read; + } +} +``` + +In this example, we create an instance of the asynchronous pipeline class `PipelineSync`, +and then synchronize among 3 asynchronously executing threads: +2 producer threads and 1 consumer thread. + +Please note that this is a basic example. +There are different versions possible, +depending on what the producer and consumer threads are doing. +Please refer to our [unit tests](/test/unit/pipeline) +and the other [pipeline classes](/include/cutlass/pipeline.hpp) +for more details. + +# Copyright + +Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/profiler.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/profiler.md new file mode 100644 index 0000000000000000000000000000000000000000..9e76d3709189a4787566560f88b978639b8cd50a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/profiler.md @@ -0,0 +1,584 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Profiler") + +[README](/README.md#documentation) > **CUTLASS Profiler** + +# CUTLASS Profiler + +The CUTLASS Profiler is a command-line driven test and profiling environment for CUTLASS computations +defined in the CUTLASS Instance Library. The CUTLASS Profiler is capable of executing each GEMM, Sparse Gemm, +Conv2d, and Conv3d kernel. + +The CUTLASS Profiler may be compiled with: +```bash +$ make cutlass_profiler -j +``` + +To limit compilation time, only one tile size (typically 128x128) and threadblock cluster size (typically 2x1x1) is instantiated for each data type, +math instruction, and layout. To instantiate all sizes, set the following environment variable when running CMake from an +empty `build/` directory. +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" -DCUTLASS_LIBRARY_KERNELS=all -DCUTLASS_UNITY_BUILD_ENABLED=ON +... +$ make cutlass_profiler -j +``` +Enabling the unity build places multiple kernel instances in one compilation unit, thereby reducing size of the compiled +binary and avoiding linker limitations on some platforms. + +The CUTLASS Profiler sources are stored in +```bash +tools/ + profiler/ +``` + +The CUTLASS Profiler usage statement may be obtained by executing `cutlass_profiler --help` and appears as follows. +```bash +CUTLASS Performance Tool +usage: + + cutlass_profiler [options] + + --help + + --mode= Cutlass profiler execution mode. + --mode=profile regular verification and profiling (default) + --mode=dry_run no kernels are launched or workspaces allocated + --mode=enumerate lists all operation kind and operations + --mode=trace executes a single device-side computation with + no other kernel launches + + --device-info Prints information on all GPUs present in the system + + --operation= CUTLASS operation to profile. + + --kernels= Filter operations by kernel names. For example, call all kernels with + ("s1688" and "nt") or ("s844" and "tn" and "align8") in their + operation name using --kernels="s1688*nt, s884*tn*align8" + + --ignore-kernels= Excludes kernels whose names match anything in this list. + +Device: + --device= CUDA Device ID + + --compute-capability= Override the compute capability. + + --llc-capacity= Capacity of last-level cache in kilobytes. If this is non-zero, + profiling phases cycle through different input tensors to induce + capacity misses in the L2. + + +Initialization: + --initialization= Enables initialization (default: true). If false, device memory is + not initialized after allocation. + + --initialization-provider= Selects initialization provider {host, device*}. (default: '*') + + --dist= Data distribution of input tensors {uniform*, gaussian, identity, sequential} + --dist=uniform,min:,max:,scale: + --dist=gaussian,mean:,stddev:,scale: + --dist=sequential,start:,delta:,scale: + --dist=identity + + --seed= Random number generator seed. Used to enforce deterministic + initialization. + + +Library: + --library-algo-mode= Indicates algorithm mode used to call libraries such as cuBLAS and cuDNN. + mode={default*,matching,best} + + --library-algos= If --algorithm-mode=best, permits specifying a selection of algorithms. + + +Profiling: + --workspace-count= Number of discrete workspaces maintained to avoid cache-resident + If zero (default), the amount is chosen for each workload based on + capacity of the last-level cache. + + --profiling-iterations= Number of iterations to profile each kernel. If zero, kernels + are launched up to the profiling duration. + + --warmup-iterations= Number of iterations to execute each kernel prior to profiling. + + --sleep-duration= Number of ms to sleep between profiling periods (ms). + + --profiling-enabled= If true, profiling is actually conducted. + +Verification: + --verification-enabled= Whether to perform verification checks. + + --epsilon= Error threshold. Setting to zero (default) requires + bit-level equivalence. + + --nonzero-floor= Results whose absolute value is less than this quantity + are treated as zero for comparisons. + + --save-workspace= Specifies when to save the GEMM inputs and results to the filesystem. + --save-workspace=never never save workspace (default) + --save-workspace=incorrect save workspace for incorrect results + --save-workspace=always always save workspace + + --verification-providers= List of providers used to verify result. (default: '*') + Gemm verification-providers {cublas*} + Conv2d verification-providers {cudnn*, device*, host} + + +Report: + --append= If true, result is appended to possibly existing file. Otherwise, + any existing file is overwritten. + + --output= Path to output file for machine readable results. Operation kind and '.csv' is appended. + + --junit-output= Path to junit output file for result reporting. Operation kind and '.junit.xml' is appended. + + --report-not-run= If true, reports the status of all kernels including those that + do not satisfy the given arguments. + + --tags= Inserts leading columns in output table and uniform values for each + column. Useful for generating pivot tables. + + --verbose= Prints human-readable text to stdout. If false, nothing is written to stdout. + + +About: + --version CUTLASS 2.4.0 built on Nov 19 2020 at 11:59:00 + + +Operations: + + gemm General matrix-matrix product. D = alpha * A*B + beta * C + spgemm Structured sparse GEMM. D = alpha * A*B + beta * C + conv2d Conv2d operation. Output(Tensor4D) = alpha * Input(Tensor4D) * Filter(Tensor4D) + beta * Input(Tensor4D) + conv3d Conv3d operation. Output(Tensor5D) = alpha * Input(Tensor5D) * Filter(Tensor5D) + beta * Input(Tensor5D) + + +For details about a particular function, specify the function name with --help. + +Example: + + $ cutlass_profiler --operation=Gemm --help + + $ cutlass_profiler --operation=Conv3d --help + + $ cutlass_profiler --operation=Conv2d --help + +``` + +# GEMM + +The CUTLASS Profiler is capable of executing GEMM and Sparse GEMM problems. + +The CUTLASS Profiler can be built with cuBLAS enabled to use as a reference implementation. If CMake detects +the cuBLAS library available in the system, it is included as a dependency. This may be explicitly overridden +with CMake flag `CUTLASS_ENABLE_CUBLAS`. + +## GEMM Arguments + +The complete set of arguments available to each operation may be viewed by specifying the operation name +in addition to `--help`. The argument flags and their aliases usable for GEMM appear as follows. + +```bash +$ ./tools/profiler/cutlass_profiler --operation=gemm --help + +GEMM + + [enum] --gemm_kind Variant of GEMM (e.g. universal, gemm, planar_complex, planar_complex_array) + [int] --m,--problem-size::m M dimension of the GEMM problem space + [int] --n,--problem-size::n N dimension of the GEMM problem space + [int] --k,--problem-size::k K dimension of the GEMM problem space + [tensor] --A Tensor storing the A operand + [tensor] --B Tensor storing the B operand + [tensor] --C Tensor storing the C operand + [scalar] --alpha,--epilogue::alpha Epilogue scalar alpha + [scalar] --beta,--epilogue::beta Epilogue scalar beta + [enum] --split_k_mode,--split-k-mode Variant of split K mode(serial, parallel) + [int] --split_k_slices,--split-k-slices Number of partitions of K dimension + [int] --batch_count,--batch-count Number of GEMMs computed in one batch + [enum] --op_class,--opcode-class Class of math instruction (simt, tensorop, wmmatensorop, wmma). + [enum] --accum,--accumulator-type Math instruction accumulator data type + [int] --cta_m,--threadblock-shape::m Threadblock shape in the M dimension + [int] --cta_n,--threadblock-shape::n Threadblock shape in the N dimension + [int] --cta_k,--threadblock-shape::k Threadblock shape in the K dimension + [int] --cluster_m,--cluster-shape::m Cluster shape in the M dimension + [int] --cluster_n,--cluster-shape::n Cluster shape in the N dimension + [int] --cluster_k,--cluster-shape::k Cluster shape in the K dimension + [int] --stages,--threadblock-stages Number of stages of threadblock-scoped matrix multiply + [int] --warps_m,--warp-count::m Number of warps within threadblock along the M dimension + [int] --warps_n,--warp-count::n Number of warps within threadblock along the N dimension + [int] --warps_k,--warp-count::k Number of warps within threadblock along the K dimension + [int] --inst_m,--instruction-shape::m Math instruction shape in the M dimension + [int] --inst_n,--instruction-shape::n Math instruction shape in the N dimension + [int] --inst_k,--instruction-shape::k Math instruction shape in the K dimension + [int] --min_cc,--minimum-compute-capability Minimum device compute capability + [int] --max_cc,--maximum-compute-capability Maximum device compute capability + +Examples: + +Profile a particular problem size: + $ cutlass_profiler --operation=Gemm --m=1024 --n=1024 --k=128 + +Schmoo over problem size and beta: + $ cutlass_profiler --operation=Gemm --m=1024:4096:256 --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5 + +Schmoo over accumulator types: + $ cutlass_profiler --operation=Gemm --accumulator-type=f16,f32 + +Run when A is f16 with column-major and B is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t): + $ cutlass_profiler --operation=Gemm --A=f16:column --B=*:row + +Using various input value distribution: + $ cutlass_profiler --operation=Gemm --dist=uniform,min:0,max:3 + $ cutlass_profiler --operation=Gemm --dist=gaussian,mean:0,stddev:3 + $ cutlass_profiler --operation=Gemm --dist=sequential,start:0,delta:1 + +Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size): + $ cutlass_profiler --operation=Gemm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect + +Test your changes to gemm kernels with a quick functional test and save results in functional-test.csv: + $ cutlass_profiler --operation=Gemm \ + --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \ + --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \ + --k=8,16,32,64,128,256,288,384,504,512,520 \ + --beta=0,1,2 --profiling-iterations=1 \ + --providers=cutlass --output=functional-test.csv +``` + +The format of tensor argument is followed by `:`. The type could be `f32` as 32-bit floating point, `s8` as 8-bit signed integer, etc. The available types can be referred to the `NumericTypeID_enumerants` in [util.cu](tools/library/src/util.cu). The layout could be `row` or `column`. + +## Example CUDA Core GEMM Operation + +Example command line for profiling SGEMM kernels is as follows: +```bash +$ ./tools/profiler/cutlass_profiler --kernels=sgemm --m=3456 --n=4096 --k=4096 + + + +============================= + Problem ID: 1 + + Provider: CUTLASS + OperationKind: gemm + Operation: cutlass_simt_sgemm_128x128_8x2_nn_align1 + + Status: Success + Verification: ON + Disposition: Passed + + cuBLAS: Passed + + Arguments: --m=3456 --n=4096 --k=4096 --A=f32:column --B=f32:column --C=f32:column --alpha=1 --beta=0 --split_k_slices=1 \ + --batch_count=1 --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 --stages=2 --warps_m=4 \ + --warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 --max_cc=1024 + + Bytes: 180355072 bytes + FLOPs: 115992428544 flops + + Runtime: 6.73655 ms + Memory: 24.934 GiB/s + + Math: 17218.4 GFLOP/s +``` + +Note, the arguments which appear in the output may be used as command line parameters for subsequent invocations. + + +## Example Tensor Core GEMM Operations + +To execute kernels targeting Tensor Core operations, supply the flag `--op_class=tensorop` in the command line. +```bash +$ ./tools/profiler/cutlass_profiler --op_class=tensorop --m=3456 --n=4096 --k=8192 + + + +============================= + Problem ID: 1 + + Provider: CUTLASS + OperationKind: gemm + Operation: cutlass_tensorop_s16816gemm_f16_256x128_32x3_nn_align8 + + Status: Success + Verification: ON + Disposition: Passed + + cuBLAS: Passed + + Arguments: --m=3456 --n=4096 --k=8192 --A=f16:column --B=f16:column --C=f32:column --alpha=1 --beta=0 --split_k_slices=1 \ + --batch_count=1 --op_class=tensorop --accum=f32 --cta_m=256 --cta_n=128 --cta_k=32 --stages=3 --warps_m=4 \ + --warps_n=2 --warps_k=1 --inst_m=16 --inst_n=8 --inst_k=16 --min_cc=80 --max_cc=1024 + + Bytes: 180355072 bytes + FLOPs: 231956545536 flops + + Runtime: 0.98647 ms + Memory: 170.272 GiB/s + + Math: 235138 GFLOP/s +``` + +## Covering the problem space + +All arguments may have single values or comma-delimited set of values. Integers may also be specified +as an inclusive range with the following syntax `start:end:increment` or simply `start:end`. + +For example, the following sweeps over the range of the GEMM K dimension from 8 to 4096 in increments +of 8 elements. + +```bash +$ ./tools/profiler/cutlass_profiler --kernels=cutlass_simt_sgemm_128x128_nn --m=4352 --n=4096 --k=8:4096:8 +``` + +## Output + +By default, runtime and computed GFLOP/s are reported for each operation and problem size. Additionally, +a table of comma separated values are reported at the end of the execution. This may be output to a file +with the `--output=` command line option as shown: + +```bash +$ ./tools/profiler/cutlass_profiler --kernels=cutlass_simt_sgemm_128x128_nn \ + --m=3456 --n=4096 --k=8:4096:8 --output=report.csv +``` + +To faclitate generation of pivot tables and charts, additional columns may be prepended with the +`--tags=:` option. One or more tags may be specified using a comma-delimited list. + +```bash +$ ./tools/profiler/cutlass_profiler --kernels=cutlass_simt_sgemm_128x128_nn \ + --m=3456 --n=4096 --k=8:4096:8 --output=report.csv \ + --tags=cutlass:2.2,date:2020-06-08 +``` + +## CUTLASS 3.0 GEMM procedural names + +CUTLASS 3.0 introduces a new naming convention for GEMMs used by the profiler targeting the NVIDIA +Hopper architecture and beyond so as to indicate new features of the kernel within the name +(e.g., the cluster shape). + +To best illustrate this naming convention, we will walk through the meaning of each of the components +in a GEMM kernel used by the profiler: + +``` +cutlass3x_sm90_tensorop_s64x128x16gemm_f16_f16_f32_f16_f32_128x128x64_2x1x1_0_ntn_align8 +``` + +The components within this name are as follows: + +* `cutlass3x`: indicates that the kernel was generated through the CUTLASS 3.0 API +* `sm90`: indicates that the kernel targets NVIDIA GPUs with compute capability 90 +* `tensorop`: indicates that the kernel makes use of NVIDIA Tensor Cores +(as opposed to `simt`, which indicates the use of "CUDA cores") +* `s`: indicates that the Tensor Core instruction being used accumulates in single precision +(as opposed to `h`, which indicates half precision) +* `64x128x16gemm`: indicates that the shape of the Tensor Core instruction being used (MxNxK) is 64x128x16 +* `f16_f16_f32_f16_f16`: indicates that the data types for operands A, B, Accumulator, C and D (in that order). +* `128x128x64`: indicates that the thread block shape used in the GEMM (MxNxK) is 128x128x64 +* `2x1x1`: indicates that the cluster shape being used is 2x1x1 +* `0`: indicates that the kernel uses the CollectiveBuilder's automatic stage calculation to determine the +number of pipeline stages in the kernel. Note that `0` does not mean that no stages are used. A nonzero value indicates that automatic stage calculation is not performed and indicates the number of pipeline stages to be used. +This 0 is only added to the kernel's procedural name, the profiler will still report the actual stage count +when printing the kernel argument details (`--stages=N`) and kernel discovery will still support filtering through the `--stages` argument. +* `ntn`: indicates that the layouts for operands A, B, and C are column major ("n"; non-transposed), +row major ("t"; transposed), and column major, respectively. +* `align8`: indicates that the maximum alignment between operands A and B is 8. + +Note that in some special cases where the input A/B types do not match that of the MMA +instruction's, the MMA facing input type is added to the instruction string as well. + +``` +cutlass3x_sm90_tensorop_s64x128x8tf32gemm_f32_f32_f32_f32_f32_128x128x32_2x1x1_0_tnn_align4 +``` + +* `s64x128x8tf32gemm`: indicates that the MMA consumes inputs in `tf32` format, and therefore +the kernel performs rounding of the `f32` values in global memory while loading them into shared memory. + +For custom mainloop or epilogue schedules, details of the opted-in schedule are appended to the end of the +kernel name. For example, + +``` +cutlass3x_sm90_tensorop_h64x128x16gemm_f16_f16_f16_void_f16_128x128x64_1x1x1_0_nnn_align8_warpspecialized_cooperative_epi_tma +``` + +* `warpspecialized_cooperative`: Mainloop employs a persistent warp-specialized mainloop and kernel schedule. +* `epi_tma`: Kernel epilogue employs TMA based vectorization. +* `f16_f16_f16_void_f16`: In this case, C type is set to `void`, indicating that residual matrix support +is disabled. + +# Convolution + +The CUTLASS Profiler is capable of executing 2-D and 3-D convolution problems for forwards and backwards +operator variants. + +The CUTLASS Profiler can be built with cuDNN enabled to use as a reference implementation. If CMake detects +the cuDNN library available in the system, it is included as a dependency. This may be explicitly overridden +with CMake flag `CUTLASS_ENABLE_CUDNN`. + +```bash +$ cmake .. -DCUTLASS_LIBRARY_OPERATIONS=conv2d -DCUTLASS_ENABLE_CUDNN=OFF +... +$ make -j16 cutlass_profiler +``` + + +## Convolution Arguments + +```bash +$ ./tools/profiler/cutlass_profiler --help --operation=Conv2d + +Conv2d + + [enum] --conv_kind Convolutional operator (fprop, dgrad, wgrad) + [int] --n,--input_n Input N dimension of the Conv2d problem space + [int] --h,--input_h Input H dimension of the Conv2d problem space + [int] --w,--input_w Input W dimension of the Conv2d problem space + [int] --c,--input_c Input C dimension of the Conv2d problem space + [int] --k,--filter_k Filter K dimension of the Conv2d problem space + [int] --r,--filter_r Filter R dimension of the Conv2d problem space + [int] --s,--filter_s Filter S dimension of the Conv2d problem space + [int] --p,--output_p Output P dimension of the Conv2d problem space + [int] --q,--output_q Output Q dimension of the Conv2d problem space + [int] --g,--groups Number of convolution groups + [int] --pad_h Padding in H direction + [int] --pad_w Padding in W direction + [int] --stride_h Stride in H direction + [int] --stride_w Stride in W direction + [int] --dilation_h Dilation in H direction + [int] --dilation_w Dilation in W direction + [tensor] --Activation Tensor storing the Activation operand + [tensor] --Filter Tensor storing the Filter operand + [tensor] --Output Tensor storing the Output operand + [enum] --conv_mode Convolution filter mode (conv, cross) + [enum] --iterator_algorithm,--iterator_algo Convolution iterator algorithm (analytic, optimized) + [scalar] --alpha,--epilogue::alpha Epilogue scalar alpha + [scalar] --beta,--epilogue::beta Epilogue scalar beta + [enum] --split_k_mode,--split-k-mode SplitK mode for serial or parallel reduction (serial, parallel) + [int] --split_k_slices,--split-k-slices Number of partitions of K dimension + [enum] --eq_gemm_provider,--eq-gemm-provider Enable profiling equivalent gemm by the following providers (cutlass) + [enum] --op_class,--opcode-class Class of math instruction (simt, tensorop, wmmatensorop, wmma) + [enum] --accum,--accumulator-type Math instruction accumulator data type + [int] --cta_m,--threadblock-shape::m Threadblock shape in the M dimension + [int] --cta_n,--threadblock-shape::n Threadblock shape in the N dimension + [int] --cta_k,--threadblock-shape::k Threadblock shape in the K dimension + [int] --cluster_m,--cluster-shape::m Cluster shape in the M dimension + [int] --cluster_n,--cluster-shape::n Cluster shape in the N dimension + [int] --cluster_k,--cluster-shape::k Cluster shape in the K dimension + [int] --stages,--threadblock-stages Number of stages of threadblock-scoped matrix multiply + [int] --warps_m,--warp-count::m Number of warps within threadblock along the M dimension + [int] --warps_n,--warp-count::n Number of warps within threadblock along the N dimension + [int] --warps_k,--warp-count::k Number of warps within threadblock along the K dimension + [int] --inst_m,--instruction-shape::m Math instruction shape in the M dimension + [int] --inst_n,--instruction-shape::n Math instruction shape in the N dimension + [int] --inst_k,--instruction-shape::k Math instruction shape in the K dimension + [int] --min_cc,--minimum-compute-capability Minimum device compute capability + [int] --max_cc,--maximum-compute-capability Maximum device compute capability + +Examples: + +Profile a particular convolution (specify all the convolution parameters): + $ cutlass_profiler --operation=Conv2d --Activation=f16:nhwc --Filter=f16:nhwc --Output=f16 --accumulator-type=f32 --n=32 --h=14 --w=14 --c=8 --k=64 --r=3 --s=3 --pad_h=1 --pad_w=1 --stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 + +``` + +## Example CUDA Core Convolution Operation + +Example command line for profiling forward propagation convolution kernels on CUDA cores is as follows: +```bash +$ ./tools/profiler/cutlass_profiler --kernels=simt_sfprop --verification-providers=device --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 + + +============================= + Problem ID: 1 + + Provider: CUTLASS + OperationKind: conv2d + Operation: cutlass_simt_sfprop_optimized_128x128_8x2_nhwc + + Status: Success + Verification: ON + Disposition: Passed + +reference_device: Passed + + Arguments: --conv_kind=fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --p=224 --q=224 --pad_h=1 --pad_w=1 \ + --stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 --Activation=f32:nhwc --Filter=f32:nhwc --Output=f32:nhwc \ + --conv_mode=cross --iterator_algorithm=optimized --alpha=1 --beta=0 --split_k_mode=serial --split_k_slices=1 \ + --eq_gemm_provider=none --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 --stages=2 --warps_m=4 \ + --warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 --max_cc=1024 + + Bytes: 2055798784 bytes + FLOPs: 118482796544 flops + + Runtime: 8.13237 ms + Memory: 235.431 GiB/s + + Math: 14569.3 GFLOP/s + +``` + +## Example Tensor Core Convolution Operation + +Example command line for profiling forward propagation convolution kernels runing on Tensor Cores is as follows: +```bash +$ ./tools/profiler/cutlass_profiler --kernels=tensorop*fprop --verification-providers=device --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 + + + +============================= + Problem ID: 1 + + Provider: CUTLASS + OperationKind: conv2d + Operation: cutlass_tensorop_s16816fprop_optimized_f16_128x128_64x4_nhwc + + Status: Success + Verification: ON + Disposition: Passed + +reference_device: Passed + + Arguments: --conv_kind=fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --p=224 --q=224 --pad_h=1 --pad_w=1 \ + --stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 --Activation=f16:nhwc --Filter=f16:nhwc --Output=f32:nhwc \ + --conv_mode=cross --iterator_algorithm=optimized --alpha=1 --beta=0 --split_k_mode=serial --split_k_slices=1 \ + --eq_gemm_provider=none --op_class=tensorop --accum=f32 --cta_m=128 --cta_n=128 --cta_k=64 --stages=4 \ + --warps_m=2 --warps_n=2 --warps_k=1 --inst_m=16 --inst_n=8 --inst_k=16 --min_cc=80 --max_cc=1024 + + Bytes: 1130659840 bytes + FLOPs: 118482796544 flops + + Runtime: 0.945071 ms + Memory: 1114.21 GiB/s + + Math: 125369 GFLOP/s + + +``` + +# Copyright + +Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/programming_guidelines.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/programming_guidelines.md new file mode 100644 index 0000000000000000000000000000000000000000..4be52bf57d52711417954361566699cf8ebeabb8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/programming_guidelines.md @@ -0,0 +1,866 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Programming Guidelines") + +[README](/README.md#documentation) > **Programming Guidelines** + +# Programming Guidelines + +## Hierarchical Organization + +The [CUTLASS 3.0 GEMM API](./gemm_api_3x.md) document +explains CUTLASS 3.0's hierarchical organization, +based conceptually on parallelization strategy. +This differs from CUTLASS 2.x's approach, +which more closely mirrors the GPU hardware hierarchy +of thread blocks, warps, and threads. + +## Design Patterns + +CUTLASS aims for the highest performance possible on NVIDIA GPUs. +It also offers flexible components that can be assembled and customized +to solve new problems related to deep learning and linear algebra. +Given a tradeoff between simplicity and performance, +CUTLASS chooses performance. +Consequently, several design patterns are necessary +to yield a composable structure +while also satisfying these performance objectives. + +### Templates + +CUDA C++ templates and modern generic programming techniques enable CUTLASS device code to span a large design space. + +This design space includes: +* Mixed precision arithmetic and data storage +* Kernels specialized for layout and problem size +* Support for kernel fusion + +Moreover, templates provided a structured approach to collecting compile-time constants such as tile dimensions. These +must be template arguments to target static array allocation and take advantage of loop unrolling, constant folding, +and function inlining. + +### Constant Memory + +Several CUTLASS template classes exhibit a pattern in which problem-specific internal state is known at kernel +launch time and remains invariant throughout the execution of a kernel. For example, tile iterators compute several +offsets based on the strides of the input tensor that is added to an internal pointer when loading the elements +of a tile. These are computed from the tensor stride and never updated; the per-thread internal state consists +only of the internal global memory pointer. + +CUTLASS can take advantage of this CUDA grid-invariant property by constructing the object in host code and passing +a composed parameters structure to the kernel. This confers two benefits: (1.) invariant state is held in constant +memory, and (2.) there is no overhead to compute the initial state by each thread. + +The design pattern in CUTLASS is for classes with nontrivial constructors to define `struct Params` as an inner class +which contains grid-invariant state. These should define a constructor and an `initialize()` method. The `Params` +structure should also include a data member corresponding to each data member in the parent class, so these too can +be properly constructed in host code. The parent class should define a constructor which accepts `Params const &` as +its first argument. + +### Composable Shared Memory + +Shared memory requires explicit effort by the programmer to allocate and de-allocate. CUTLASS follows the paradigm +introduced by [CUB](https://nvlabs.github.io/cub/) to define composed structures for storing data intended to be held +in shared memory. Any object requiring shared memory storage for itself or its data members should define a child +structure called `SharedStorage`. This holds data needed by the class and also instantiates `SharedStorage` +objects for each data member. + +To be consistent, this pattern defines a convention in which classes define internal shared memory storage requirements. +Classes should consider all SharedStorage structures to be opaque other than their own child class. When the lifetimes +of child objects are known to be non-overlapping, `union`s may be used to alias multiple SharedStorage objects to the same +shared memory region and reduce overall shared memory capacity. Developers should carefully note that C++ `union` rules +require that they only access the most recently written ("active") member of the `union`; this differs from C rules. + +### Loop Unrolling + +CUTLASS requires tiles of data to be stored in registers for high-bandwidth access. Simultaneously, high-throughput math instructions +must be issued concurrently with memory instructions to hide latency with relatively few concurrent threads. These objectives are +achieved by unrolling loops whose iteration counts are known at compile time. + +Consequently, most loops within the CUTLASS GEMM implementation are specified by constant values and template arguments. The CUDA compiler +is able to unroll the loop bodies, map array elements to registers, and construct an efficient instruction schedule. + +All loops expected to be unrolled should be annotated with `CUTLASS_PRAGMA_UNROLL` to explicitly direct the compiler +to unroll them. + +```c++ +int const kN = 8; +Array x; // Array we would like to store in registers + +CUTLASS_PRAGMA_UNROLL // Directs the CUDA compiler to unroll this loop. +for (int idx = 0; idx < kN; ++idx) { // Loop has constant number of iterations. + + x[i] = float(idx); // Indirect access by induction variable results in + // direct register access. +} +``` + +## Style + +### No automatic code formatting + +Do not use any kind of automatic code formatting, +like `clang-format`, on CUTLASS code. + +### C++ style + +#### CUTLASS is a C++ project + +CUTLASS is a C++ project. CUDA C++ is a C++ dialect. +Therefore, we write using standard C++ idioms as much as possible. +We aim for portability to as many compilers as possible, +by writing host code in Standard C++ +and device code in CUDA C++ +that resembles Standard C++ as much as possible. +This improves usability +for the general community of C++ developers, +and makes it easier for new staff to join the project. + +#### Follow Standard C++ idioms where possible + +Regarding "standard C++ idioms," +CUTLASS source code follows the following guidelines, +with deviations only because of compiler limitations +or where performance absolutely requires it. +"Performance requires it" implies measurement. +Deviations should be limited in scope +and we should always strive to eliminate them. + +* [C++ Core Guidelines](https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md) + +* [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html) + +#### Spacing and line length + +* Use spaces, not tabs. + +* Use 2 spaces to indent. + +* Max 100 characters per line. + +Lines longer than 100 characters typically wrap unfavorably +when viewed in Github's pretty printer. + +#### Function indentation + +When calling a function or function object with a long name, +break the line right after the invoking open parenthesis. +Here is an example. + +```c++ +detail::very_long_function_object_name{}( + params.long_parameter_name, some_operator.another_long_function_name()); +``` + +When declaring functions, indent function parameters like this. + +```c++ +void possibly_an_unusually_long_function_name( + std::uint32_t foo + std::uint32_t const* bar, + TypeA a, + TypeB b, + TypeC c) { + // ... the function's body ... +} +``` + +A newline should not be inserted between the parenthesis +that closes the function's parameters and the curly bracket +that opens the function's body. Note the double indent for function parameters. + +#### If-else brackets and spacing + +* Always use braces with conditionals such as `if`. + +* Use a space after control flow keywords + such as `if`, `for`, and `while`. + +* Use a space after the parenthesis closing a conditional + such as `if`, and the curly bracket opening a scope. + +* Use a new line between the closing brace + of an `if` branch, and the `else` keyword. + +```c++ +if (condition) { + // ... code ... +} +else { + // ... other code ... +} + +for (int k = 0; k < num_iters; ++k) { + // ... still more code ... +} +``` + +#### East const + +CUTLASS uses the +["East const"](http://slashslash.info/2018/02/a-foolish-consistency/) +convention. +That is, the `const` or `constexpr` keyword +goes after the type, not before. +The general rule is that `const` or `constexpr` +modifies the type to the left of it. +Here are some examples. + +```c++ +float constexpr compile_time_constant = 42.3f; + +float const const_float = /* whatever */; +float const& reference_to_const_float = const_float; +float const* pointer_to_const_float = &const_float; +float const* const const_pointer_to_const_float = &const_float; + +float nonconst_float; +float& reference_to_nonconst_float = nonconst_float; +float* pointer_to_nonconst_float = &nonconst_float; +float* const pointer_to_nonconst_float = &nonconst_float; +``` + +Contrast this with "West const" style, e.g., + +```c++ +const float const_float = /* whatever */; +const float* pointer_to_const_float = &const_float; +``` + +#### Alignment of reference and pointer types + +For reference and pointer types, +align the `&` resp. `*` flush against the type +that it modifies. This is called "left alignment." + +For example, do this: + +```c++ +int const& var; +int const* var; +``` + +and not this. + +```c++ +int const &var; +int const *var; +``` + +#### Avoid calling functions "fast" or "optimized" + +Putting words like "fast" or "optimized" +in the name of a function +assumes that the "fast" path is actually faster. +That might be true now, but later changes +(in the code, compilers, or GPU hardware) +might make it false. In that case, +your name could be unintentionally misleading. +Consider instead a name that briefly describes +the algorithm or feature that is relevant for optimization. +For example, `compute_on_host` is more meaningful +than `compute_slowly`, and computing on host +might be faster in some cases +(e.g., if the data are already on host +and the algorithm is not GPU-friendly). + +CUTLASS code has not always followed this rule in the past. +Some functions and classes might have words like "fast" in their name. +New code should follow this rule, however. + +#### Avoid creating unconstrained templated functions with common names + +See [C++ Core Guidelines T.47](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#t47-avoid-highly-visible-unconstrained-templates-with-common-names): +"Avoid highly visible unconstrained templates +with common names." +Argument-dependent lookup (ADL) means that +if users call a function name without specifying the namespace, +the compiler can find overloads +of that function in any namespace. +This can lead to ambiguous overloads in users' code, +just because they happened to include one of your header files +that exposes an unconstrained function template. +The following illustrates this +with an unconstrained swap overload in the `cutlass` namespace. + +```c++ +#include +#include +#include + +// Uncomment the line below to observe unwarranted build errors. +//#define BAD_CUTLASS_SWAP 1 + +namespace cutlass { +struct Bar { + float f; +}; +} // namespace cutlass + +#ifdef BAD_CUTLASS_SWAP +namespace cutlass { + +// don't do this +template +void swap(T& a, T& b) { + T tmp = a; + a = b; + b = tmp; +} + +} // namespace cutlass +#endif // BAD_CUTLASS_SWAP + +namespace other { + +#ifdef BAD_CUTLASS_SWAP +using cutlass::swap; +#endif // BAD_CUTLASS_SWAP + +// Imagine for the sake of this example +// that "foo" is a less common name, +// and that T is constrained via +// std::enable_if or a requires clause. +template +void foo(T& a, T& b) { + // The usual idiom for using std::swap is the "swap two-step": + // + // 1. import std::swap into the current scope, then + // 2. call swap without namespace qualification. + // + // That won't build if we have another swap + // overload available in the scope already. + + using std::swap; + swap(a, b); // OBSERVE UNWARRANTED BUILD ERROR HERE +} + +} // namespace other + +int main() { + int x = 42; + int y = 43; + other::foo(x, y); + assert(x == 43); + assert(y == 42); + + cutlass::Bar a{42.0}; + cutlass::Bar b{43.0}; + other::foo(a, b); + assert(a.f == 43.0); + assert(b.f == 42.0); + + // GCC 7.5 std::unique_ptr::reset calls swap, + // leading to the same issue as above. + // GCC 12.2's implementation of std::unique_ptr + // does not have this issue. Nevertheless, + // breaking the swap two-step will break users' code, + // just by them happening to include your headers. + auto ptr = std::make_unique(cutlass::Bar{666.0f}); + ptr.reset(new cutlass::Bar{777.0f}); // OBSERVE UNWARRANTED BUILD ERROR HERE + + return 0; +} +``` + +#### Function return values and in-out parameters + +##### Prefer return values to output parameters + +In general, avoid in-out mutable references to return a value. +If you need to return multiple values, +you can return them by `struct` or `tuple`, +rather than by output references. +This includes the special case of error reporting +by returning either a value or an error code. +Please see the next section for details. + +```c++ +// Instead of passing in-out mutable references ... +void not_preferred(float& input_and_output); // not preferred + +// keep functions pure and return value types instead +float preferred(float input); // preferred +``` + +##### Return multiple values by struct or tuple + +Sometimes a function needs to return multiple values. In that case, consider the following, in decreasing order of preference. + +1. Return a `struct`. This lets you name the fields + (for more self-documenting code), + yet still permits use of structured binding. + +2. Return a `tuple`. If you need a tuple type + that works on device, use `cute::tuple`. + (Please note that `cute::tuple` does not work + for all the types that work in `std::tuple`. + CuTe's documentation explains.) + +Here is an example of the struct approach for named values. +For a comparable example in the C++ Standard, +please see [`std::allocate_at_least`](https://en.cppreference.com/w/cpp/memory/allocate_at_least), +which returns `std::allocation_result`. + +```c++ +struct my_computation_result { + float value = 0.0f; + float relative_error = 0.0f; + bool success = false; +}; + +my_computation_result my_computation(float tolerance); + +void foo(float tolerance) { + // Approach 1: Use structured binding. The names + // you choose on the left-hand side have nothing + // to do with the struct, so it's up to you + // to get the order right. On the other hand, + // this code works whether my_computation returns + // a struct or a tuple. + auto [val, rel_err, ok] = my_computation(tolerance); + + // Approach 2: Keep the struct and use its named fields. + // This approach prevents errors like mixing the order of return types. + // However, it only works for structs, not for tuples. + + auto result = my_computation(tolerance); + if (not result.success) { + // computation did not succeed + } + else if (result.relative_error > tolerance) { + // successful but relative error too large + } + else { + // successful and relative error is in bounds + } +} +``` + +##### Reporting errors from a function that returns one or more values + +We may want to return one or more values +from a function that could fail +or otherwise report errors. +That is, the function either + +* returns one or more valid values, or + +* does not return any values and reports an error, + +but NOT BOTH. We contrast this with cases +when it's meaningful to report both a result +and whether the result is satisfactory. +For example, when solving +a system of nonlinear equations iteratively, +users may want the approximate computed solution, +even if the iteration did not succeed +by converging to the desired tolerance +in the desired number of steps. +(Users may want to invest more steps, +or use the current approximation +to jump-start a different algorithm.) + +We're talking here about the "either valid value(s), +or error, but not both" case. +For this case, C++ offers a few options. + +1. Return the value(s), or throw an exception on error + +2. `std::expected` (requiring C++23) or something like it + +3. `std::optional` (for a Boolean error state) + or something like it + +4. `std::variant` (a C++17 fall-back for `std::expected`) + or something like it + +5. C-style interface: return an error code, + and "return" the values as output parameters + +We usually cannot or do not want to +throw exceptions on device. +Some code projects forbid exceptions entirely +(on host or device) +and tell the compiler to disable them. +If we exclude a C-style interface (the last option) +as not idiomatic C++, then for host-only code, +`std::expected`, `std::optional`, and `std::variant` +all work. +For code that needs to build and run on device, +we can fall back to libcu++ equivalents +in the `cuda::std::` namespace, when they exist. +Otherwise, we must resort to returning a struct or tuple +with the value and the error information, +and ask users not to use the value on error. +This is acceptable if the value can be constructed +cheaply with a reasonable default. + +##### Performance of different value-or-error reporting methods + +[P1886R0](https://wg21.link/P1886R0) +(Ben Craig, "Error speed benchmarking") +surveys different ways in Standard C++ +to report errors from a function +that returns one or more values, +and compares their (host-only) performance +with different compilers. + +##### Use aggregate initialization when returning a struct or tuple + +Use aggregate initialization when returning a struct or tuple. +This avoids duplication of the return type name. + +```c++ +struct foo_result { + float value = 0.0f; + float error = 0.0f; + bool success = false; +}; + +foo_result foo(std::span input) { + // ... code ... + + // Prefer this. We know what type the function returns. + return {val, err, ok}; // prefer this + + // Naming foo_result again here is unnecessary. + // return foo_result{val, err, ok}; +} +``` + +However, note that this won't work if the function returns `auto`. +The general rule is to avoid code duplication. + +```c++ +auto foo(std::span input) { + // ... code ... + + if constexpr (some_condition) { + return foo_result{val, err, ok}; + } + else { + return bar_result{val, err, ok}; + } +} +``` + +##### Prefer using the actual return type to auto, if you know the type + +C++ lets you use `auto` to deduce the type returned from a function. + +* If you know the actual type, prefer using the type instead of `auto`. + +* Use [Constructor Type Argument Deduction](https://en.cppreference.com/w/cpp/language/class_template_argument_deduction) + (CTAD) if you know that a function returns some type + (e.g., `Tensor`), but don't know the type's template arguments. + +* Use `auto` in structured bindings (where you have to use it anyway). This also makes your code agnostic of whether the return type is a `struct`, `tuple`, `pair`, or other tuple-like type. + +* Be careful using `auto` with types that provide expression templates. + +Contrast this with "Almost Always Auto" (AAA) style. +We deliberately choose not to follow AAA style, +for the following reasons. + +* Using the actual type when we know it can help prevent common loss-of-precision errors in mixed-precision computations, an important use case for CUTLASS. + +* CTAD gives us much of the brevity of AAA, with more clarity. + +* Using the actual type instead of `auto` can prevent common dangling errors with expression templates. + +#### Classes and structs + +Type names use `CamelCase`. +That is, words start with capital letters. +The remaining letters in the word are lower case, +and words are joined with no intervening underscores. +The only exception is when implementations are +a drop-in replacement for C++ Standard Library components. + +Follow the +[C++ Core Guidelines](https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#Rc-struct) +to decide whether to use `class` or `struct`. + +* Use `class` when the object must maintain an invariant. + Data members related to the invariant should be `private`. + +* Use `struct` when the class has no invariant to maintain, + and data members may vary arbitrarily with respect to each other. + +Prefer nonmember functions and statelessness where possible. +Member functions imply invariants. +More invariants make code maintenance and testing harder. + +#### Class members + +Methods and members are written using `snake_case`. + +Private data and function members have suffix `_`. + +#### Class Member Order + +Members within classes and structures should be organized as follows: + +1. Type and constant definitions + +2. Data members + +3. Constructors + +4. Other methods + +This convention follows the +[CUB library](https://nvlabs.github.io/cub/) +and is also described by +[Howard Hinnant](https://howardhinnant.github.io/classdecl.html). +It also approximates the usual ordering of chapters +in a typical Systems and Controls textbook. +That is, it + +1. identifies relevant constants, + +2. defines a state-space representation + of the dynamical system under study + (the class's data members), and then + +3. devotes the remaining "chapters" to defining + the system's dynamical behavior + (the class's methods). + +Here is an example class. + +```c++ +class A { +public: + // type definitions +protected: + // protected type definitions +private: + // private type definitions + +public: + // data members +protected: + // protected data members + // STRONGLY TO BE AVOIDED; + // please see C++ Core Guidelines +private: + // private data members + +public: + // methods +protected: + // protected methods +private: + // private methods +}; +``` + +#### Use scoped enums + +Use scoped enums (a C++11 feature) for enumerated types. +Use capital letters for the enumerated type name +and prefix `k` for enumerators like other constants. + +```c++ +enum class MatrixOperation { + kNone, + kTranspose, + kConjugate, + kHermitian +}; +``` + +#### Namespaces + +Namespaces are all lower case. +The top-level namespace is `cutlass::`. +The second nested namespace refers to +the general category of operation +performed by its members: e.g., `gemm::`. +The third nested namespace refers to +the operations' position in the conceptual hierarchy: +e.g., `device::`, `kernel::`, or `collective::`. + +The bodies of namespace definitions should not be indented. +Comments on the closing brace to indicate +the namespace being closed are welcome. + +```c++ +namespace cutlass { +namespace gemm { +namespace kernel { + +struct AnotherGemmKernel { + // ... contents ... +}; + +} // namespace kernel +} // namespace gemm +} // namespace cutlass +``` + +#### File Names + +New files should be named using `snake_case` +with extension `.hpp` for header files, +`.cu` for CUDA sources, +and `.cpp` for C++ host-only source files. + +Header files with extension `.h` +are CUTLASS 2.x legacy headers. + +#### Macros + +Only use macros when the preprocessor +is the only way to accomplish the task. +Do not use macros for literal constants. +Instead, if inside the body of a function, +use `constexpr` values, +and if at namespace scope, use +[`inline constexpr` variables](https://en.cppreference.com/w/cpp/language/inline) +(a C++17 feature). + +"Namespace" macros by starting them with the module name, e.g., `CUTLASS_`. +Macros and ONLY MACROS use all capital letters with underscores between words. +For example: + +```c++ +#define CUTLASS_MACROS_USE_ALL_CAPS inline __host__ __device__ +``` + +Header files such as +[cutlass/cutlass.h](../../include/cutlass/cutlass.h) +and +[cute/config.hpp](../../include/cutlass/cutlass.h) +offer macros for expressing compiler-dependent behavior. +These include + +* replacements for `__device__` and/or `__host__` + annotations: + + * `CUTLASS_HOST_DEVICE` or `CUTE_HOST_DEVICE` + for functions that run on the host and the device, + + * `CUTLASS_DEVICE` or `CUTE_DEVICE` + for functions that run on the device only, + + * `CUTE_HOST` + for functions that run on the host only, and + + * `CUTE_HOST_RTC` + for functions that run on the host only, + but occur as unevaluated operands (of e.g., `decltype` or `sizeof`; + see C++ Standard, `[expr.context]` 1) in device code; and + +* annotations to loop unrolling: + + * `CUTLASS_PRAGMA_UNROLL` or `CUTE_UNROLL` + for full unrolling of loops with constant trip counts, and + + * `CUTLASS_PRAGMA_NO_UNROLL` or `CUTE_NO_UNROLL` to prevent unrolling. + +#### Guard all headers with `#pragma once` + +Use `#pragma once` to guard all headers. + +### CuTe Layout Comments + +* Right align CuTe layout comments at column 120. +* If layout comment is too long do your best to align it. +* If layout comment is too long and there are many related tensors that reader should read together, try to align the layout comments of related tensors. + +```c++ + Tensor my_tensor = make_tensor(Layout{}, Stride<_1,_2>>{}); // (2,2):(1,2) + + // Related tensors + Tensor my_tensor1 = make_tensor(ThisIsAVeryComplicatedLayoutWithAVeryLongName); // ((Mode0_0,Mode0_1,Mode0_2),Mode1,Mode2,Mode3) + Tensor my_tensor2_related = make_tensor(ThisIsAVeryComplicatedLayoutWithAVeryLongName); // ((Mode0_0,Mode0_1,Mode0_2),Mode1,Mode2,Mode3) +``` + +### CUDA C++ style + +#### CUDA Built-in Variables + +Avoid direct access to CUDA built-in variables `threadIdx`, `blockIdx`, `blockDim`, and `gridDim` within +CUTLASS components except in special circumstances. + +Using built-in global variables directly within resuable components necessitates that all components +use them consistently which may not be possible if CUTLASS components are used in other contexts. + +Instead, components should accept a linear ID identifying threads, warps, and threadblocks from calling +code. The top-level kernel may then decide how to map threads, warps, and blocks to the problem it is +solving. + +#### Use CUTLASS's and CuTe's fundamental types and operations + +Use the +[fundamental types and operations](fundamental_types.md) +defined in CUTLASS consistently. +This contributes to a framework of interoperable, consistent components. +It reduces code duplication, which reduces build and test times. +It also saves developer effort. + +CUTLASS's fundamental types and operations include + +* [Numeric types](fundamental_types.md#numeric-types) to represent numeric data in host and device code, and + +* [functional.h](fundamental_types.md#functional) to perform numeric operations in generic code. + +CUTLASS 3.0 uses CuTe components to represent data layouts and multidimensional arrays. +Please refer to the [CuTe Tutorial](./cute/00_quickstart.md) for details. +CuTe has replaced CUTLASS 2.x components such as +[Containers](fundamental_types.md#containers), +[Layouts](layout.md), and +[`TensorRef` and `TensorView`](layout.md#tensorref). + +## CUTLASS idioms + +### Detecting major mode + +Developers sometimes need to detect whether a tensor is MN-major or K-major. +(For definitions, see the [CuTe GEMM tutorial](./cute/0x_gemm_tutorial.md).) + +* _Correct_: `cutlass::detail::is_major<0, Stride>()` or +`cutlass::detail::is_k_major()` from `include/cutlass/gemm/gemm.h` + +* _Incorrect_: `get<0>(stride) == 1` + +The second point is incorrect because it assumes that the mode +is a single integer, not a multimode. +This means that the code will fail to compile for tensor contractions. +For example, suppose that a tensor A +has shape `((X, Y), K)` and stride `((1, X), X*Y)`. +`get<0>(stride)` is the tuple `(1, X)`, not a single integer. +However, A is certainly M major if interpreted as a matrix. + +# Copyright + +Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/quickstart.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/quickstart.md new file mode 100644 index 0000000000000000000000000000000000000000..c43882cc4d554ed5718c6506a6101192c4621e2b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/quickstart.md @@ -0,0 +1,685 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Quick Start Guide") + +[README](/README.md#documentation) > **Quick Start** + +# Quickstart + +## Prerequisites + +CUTLASS requires: +- NVIDIA CUDA Toolkit (11.4 or later required, [12.0](https://developer.nvidia.com/cuda-toolkit) recommended) +- CMake 3.18+ +- host compiler supporting C++17 or greater (minimum g++ 7.5.0) +- Python 3.6+ + +CUTLASS may be optionally compiled and linked with +- cuBLAS +- cuDNN v7.6 or later + +## Initial build steps + +Construct a build directory and run CMake. +```bash +$ export CUDACXX=${CUDA_INSTALL_PATH}/bin/nvcc + +$ mkdir build && cd build + +$ cmake .. -DCUTLASS_NVCC_ARCHS=90a # compiles for NVIDIA Hopper GPU architecture +``` + +If your goal is strictly to build only the CUTLASS Profiler and to minimize compilation time, we suggest +executing the following CMake command in an empty `build/` directory. +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS=90a -DCUTLASS_ENABLE_TESTS=OFF -DCUTLASS_UNITY_BUILD_ENABLED=ON +``` + +This reduces overall compilation time by excluding unit tests and enabling the unit build. + +You may reduce build times by compiling only certain operations by setting the `CUTLASS_LIBRARY_OPERATIONS` flag as shown below, +executed from an empty `build/` directory. This only compiles 2-D convolution kernels. + +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS=90a -DCUTLASS_LIBRARY_OPERATIONS=conv2d +``` + +You may also filter kernels by name by supplying a filter string with flag `CUTLASS_LIBRARY_KERNELS`. For example the below command selects only CUTLASS-3 kernels. + +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS=90a -DCUTLASS_LIBRARY_KERNELS=cutlass3x* +``` +See more examples on selectively compiling CUTLASS GEMM and convolution kernels [here](quickstart.md#example-cmake-commands). + +You may explicitly exclude cuBLAS and cuDNN as dependencies with the following CMake flags. +- `-DCUTLASS_ENABLE_CUBLAS=OFF` +- `-DCUTLASS_ENABLE_CUDNN=OFF` + + +## Build and run the CUTLASS Profiler + +From the `build/` directory created above, compile the the CUTLASS Profiler. +```bash +$ make cutlass_profiler -j12 +``` + +Then execute the CUTLASS Profiler computing GEMM, execute the following command. +```bash +$ ./tools/profiler/cutlass_profiler --kernels=sgemm --m=4352 --n=4096 --k=4096 + +============================= + Problem ID: 1 + + Provider: CUTLASS + Operation: cutlass_simt_sgemm_128x128_nn + + Disposition: Passed + Status: Success + + Arguments: --m=4352 --n=4096 --k=4096 --A=f32:column --B=f32:column --C=f32:column --alpha=1 --beta=0 \ + --split_k_slices=1 --batch_count=1 --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 \ + --stages=2 --warps_m=2 --warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 \ + --max_cc=1024 + + Bytes: 52428800 bytes + FLOPs: 146064539648 flops + + Runtime: 10.5424 ms + Memory: 4.63158 GiB/s + + Math: 13854.9 GFLOP/s +``` + +To execute the CUTLASS Profiler for convolution, run the following example. +```bash +$ ./tools/profiler/cutlass_profiler --kernels=s1688fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --pad_h=1 --pad_w=1 +``` + +To execute all CUTLASS 2-D convolution operators, execute the following. +```bash +$ ./tools/profiler/cutlass_profiler --operation=conv2d --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 + + +============================= + Problem ID: 1 + + Provider: CUTLASS + OperationKind: conv2d + Operation: cutlass_simt_sfprop_optimized_128x128_8x2_nhwc + + Status: Success + Verification: ON + Disposition: Passed + +reference_device: Passed + + Arguments: --conv_kind=fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --p=224 --q=224 --pad_h=1 --pad_w=1 \ + --stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 --Activation=f32:nhwc --Filter=f32:nhwc --Output=f32:nhwc \ + --conv_mode=cross --iterator_algorithm=optimized --alpha=1 --beta=0 --split_k_mode=serial --split_k_slices=1 \ + --eq_gemm_provider=none --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 --stages=2 --warps_m=4 \ + --warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 --max_cc=1024 + + Bytes: 2055798784 bytes + FLOPs: 118482796544 flops + + Runtime: 8.13237 ms + Memory: 235.431 GiB/s + + Math: 14569.3 GFLOP/s + +``` + +See [documentation for the CUTLASS Profiler](profiler.md) for more details. + +## Build and run CUTLASS Unit Tests + +From the `build/` directory created above, simply build the target `test_unit` to compile and run +all unit tests. + +```bash +$ make test_unit -j +... +... +... +[----------] Global test environment tear-down +[==========] 946 tests from 57 test cases ran. (10812 ms total) +[ PASSED ] 946 tests. +$ +``` +The exact number of tests run is subject to change as we add more functionality. + +No tests should fail. Unit tests automatically construct the appropriate runtime filters +to avoid executing on architectures that do not support all features under test. + +The unit tests are arranged hierarchically mirroring the CUTLASS Template Library. This enables +parallelism in building and running tests as well as reducing compilation times when a specific +set of tests are desired. + +For example, the following executes strictly the warp-level GEMM tests. +```bash +$ make test_unit_gemm_warp -j +... +... +[----------] 3 tests from SM75_warp_gemm_tensor_op_congruous_f16 +[ RUN ] SM75_warp_gemm_tensor_op_congruous_f16.128x128x8_32x128x8_16x8x8 +[ OK ] SM75_warp_gemm_tensor_op_congruous_f16.128x128x8_32x128x8_16x8x8 (0 ms) +[ RUN ] SM75_warp_gemm_tensor_op_congruous_f16.128x128x32_64x64x32_16x8x8 +[ OK ] SM75_warp_gemm_tensor_op_congruous_f16.128x128x32_64x64x32_16x8x8 (2 ms) +[ RUN ] SM75_warp_gemm_tensor_op_congruous_f16.128x128x32_32x32x32_16x8x8 +[ OK ] SM75_warp_gemm_tensor_op_congruous_f16.128x128x32_32x32x32_16x8x8 (1 ms) +[----------] 3 tests from SM75_warp_gemm_tensor_op_congruous_f16 (3 ms total) +... +... +[----------] Global test environment tear-down +[==========] 104 tests from 32 test cases ran. (294 ms total) +[ PASSED ] 104 tests. +[100%] Built target test_unit_gemm_warp +``` + +## Building for Multiple Architectures + +To minimize compilation time, specific GPU architectures can be enabled via the CMake command, +selected by [CUDA Compute Capability.](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities) + +**NVIDIA Ampere Architecture.** +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS=90a # compiles for NVIDIA Hopper GPU architecture +``` + +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS=80 # compiles for NVIDIA Ampere GPU architecture +``` + +**NVIDIA Turing Architecture.** +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS=75 # compiles for NVIDIA Turing GPU architecture +``` + +**NVIDIA Volta Architecture.** +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS=70 # compiles for NVIDIA Volta GPU architecture +``` + +**NVIDIA Pascal Architecture.** +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS="60;61" # compiles for NVIDIA Pascal GPU architecture +``` + +**NVIDIA Maxwell Architecture.** +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS="50;53" # compiles for NVIDIA Maxwell GPU architecture +``` + +## Using CUTLASS within other applications + +Applications should list [`/include`](/include) within their include paths. They must be +compiled as C++17 or greater. + +**Example:** print the contents of a variable storing half-precision data. +```c++ +#include +#include +#include +#include + +int main() { + + cutlass::half_t x = 2.25_hf; + + std::cout << x << std::endl; + + return 0; +} +``` + +## Launching a GEMM kernel in CUDA + +**Example:** launch a mixed-precision GEMM targeting Turing Tensor Cores. + +_Note, this example uses CUTLASS Utilities. Be sure `tools/util/include` is listed as an include path._ +```c++ +#include +#include + +#include + +int main() { + + // Define the GEMM operation + using Gemm = cutlass::gemm::device::Gemm< + cutlass::half_t, // ElementA + cutlass::layout::ColumnMajor, // LayoutA + cutlass::half_t, // ElementB + cutlass::layout::ColumnMajor, // LayoutB + cutlass::half_t, // ElementOutput + cutlass::layout::ColumnMajor, // LayoutOutput + float, // ElementAccumulator + cutlass::arch::OpClassTensorOp, // tag indicating Tensor Cores + cutlass::arch::Sm75 // tag indicating target GPU compute architecture + >; + + Gemm gemm_op; + cutlass::Status status; + + // + // Define the problem size + // + int M = 512; + int N = 256; + int K = 128; + + float alpha = 1.25f; + float beta = -1.25f; + + // + // Allocate device memory + // + + cutlass::HostTensor A({M, K}); + cutlass::HostTensor B({K, N}); + cutlass::HostTensor C({M, N}); + + cutlass::half_t const *ptrA = A.device_data(); + cutlass::half_t const *ptrB = B.device_data(); + cutlass::half_t const *ptrC = C.device_data(); + cutlass::half_t *ptrD = C.device_data(); + + int lda = A.device_ref().stride(0); + int ldb = B.device_ref().stride(0); + int ldc = C.device_ref().stride(0); + int ldd = C.device_ref().stride(0); + // + // Launch GEMM on the device + // + + status = gemm_op({ + {M, N, K}, + {ptrA, lda}, // TensorRef to A device tensor + {ptrB, ldb}, // TensorRef to B device tensor + {ptrC, ldc}, // TensorRef to C device tensor + {ptrD, ldd}, // TensorRef to D device tensor - may be the same as C + {alpha, beta} // epilogue operation arguments + }); + + if (status != cutlass::Status::kSuccess) { + return -1; + } + + return 0; +} +``` + +Note, the above could be simplified as follows using helper methods defined in `HostTensor`. +```c++ + cutlass::HostTensor A({M, K}); + cutlass::HostTensor B({K, N}); + cutlass::HostTensor C({M, N}); + + // + // Use the TensorRef returned by HostTensor::device_ref(). + // + + status = gemm_op({ + {M, N, K}, + A.device_ref(), // TensorRef to A device tensor + B.device_ref(), // TensorRef to B device tensor + C.device_ref(), // TensorRef to C device tensor + C.device_ref(), // TensorRef to D device tensor - may be the same as C + {alpha, beta} // epilogue operation arguments + }); +``` + +## Launching a GEMM kernel using CUTLASS 3.0 or newer + +**Example:** launch a mixed-precision GEMM targeting Hopper Tensor Cores. + +```c++ +#include "cutlass/cutlass.h" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" + +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/packed_stride.hpp" + +using namespace cute; + +int main(int argc, char const **args) { + + // A matrix configuration + using ElementA = cutlass::half_t; // Element type for A matrix operand + using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand + constexpr int AlignmentA = 128 / cutlass::sizeof_bits::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes) + + // B matrix configuration + using ElementB = cutlass::half_t; // Element type for B matrix operand + using LayoutB = cutlass::layout::ColumnMajor; // Layout type for B matrix operand + constexpr int AlignmentB = 128 / cutlass::sizeof_bits::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes) + + // C/D matrix configuration + using ElementC = cutlass::half_t; // Element type for C and D matrix operands + using LayoutC = cutlass::layout::ColumnMajor; // Layout type for C and D matrix operands + + // Core kernel configurations + using ElementAccumulator = float; // Element type for internal accumulation + using ArchTag = cutlass::arch::Sm90; // Tag indicating the minimum SM that supports the intended feature + using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag + using TilesShape = Shape<_128,_128,_64>; // Threadblock-level tile size + using ClusterShape = Shape<_1,_2,_1>; // Shape of the threadblocks in a cluster + using StageCountType = cutlass::gemm::collective::StageCountAuto; // Stage count maximized based on the tile size + using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto; // Kernel to launch based on the default setting in the Collective Builder + + using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< + ArchTag, OperatorClass, + ElementA, LayoutA, AlignmentA, + ElementB, LayoutB, AlignmentB, + ElementAccumulator, + TilesShape, ClusterShape, + cutlass::gemm::collective::StageCountAuto, + cutlass::gemm::collective::KernelScheduleAuto + >::CollectiveOp; + + using CollectiveEpilogue = cutlass::epilogue::collective::DefaultEpilogue< + cutlass::gemm::TagToStrideC_t, + cutlass::gemm::TagToStrideC_t, + cutlass::epilogue::thread::LinearCombination>; + + using GemmKernel = cutlass::gemm::kernel::GemmUniversal< + Shape, // Indicates ProblemShape + CollectiveMainloop, + CollectiveEpilogue + >; + + using Gemm = cutlass::gemm::device::GemmUniversalAdapter; + + Gemm gemm_op; + cutlass::Status status; + + // + // Define the problem size + // + + int M = 512; + int N = 256; + int K = 128; + + float alpha = 1.25f; + float beta = -1.25f; + + // + // Allocate device memory + // + + cutlass::DeviceAllocation block_A; + cutlass::DeviceAllocation block_B; + cutlass::DeviceAllocation block_C; + cutlass::DeviceAllocation block_D; + + using StrideA = typename Gemm::GemmKernel::StrideA; + using StrideB = typename Gemm::GemmKernel::StrideB; + using StrideC = typename Gemm::GemmKernel::StrideC; + using StrideD = typename Gemm::GemmKernel::StrideD; + + StrideA stride_A; + StrideB stride_B; + StrideC stride_C; + StrideD stride_D; + + stride_A = cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(M, K, Int<1>{})); + stride_B = cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(N, K, Int<1>{})); + stride_C = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M, N, Int<1>{})); + stride_D = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M, N, Int<1>{})); + + block_A.reset(M * K); + block_B.reset(K * N); + block_C.reset(M * N); + block_D.reset(M * N); + + // + // Launch GEMM on the device + // + + status = gemm_op({ + cutlass::gemm::GemmUniversalMode::kGemm, + {M, N, K}, + block_A.get(), + stride_A, + block_B.get(), + stride_B, + {block_C.get(), stride_C, block_D.get(), stride_D, {alpha, beta}} + }); + + if (status != cutlass::Status::kSuccess) { + return -1; + } + + return 0; +} +``` + +# CUTLASS Library + +The [CUTLASS Library](/tools/library) defines an API for managing and executing collections of compiled +kernel instances and launching them from host code without template instantiations in client code. + +The host-side launch API is designed to be analogous to BLAS implementations for convenience, though its +kernel selection procedure is intended only to be functionally sufficient. It may not launch the +optimal tile size for a given problem. It chooses the first available kernel whose data types, +layouts, and alignment constraints satisfy the given problem. Kernel instances and a data structure +describing them are completely available to client applications which may choose to implement their +own selection logic. + +[cuBLAS](https://developer.nvidia.com/cublas) offers the best performance and functional coverage +for dense matrix computations on NVIDIA GPUs. + +The CUTLASS Library is used by the CUTLASS Profiler to manage kernel instances, and it is also used +by several SDK examples. + +* [10_planar_complex](/examples/10_planar_complex/planar_complex.cu) +* [11_planar_complex_array](/examples/11_planar_complex_array/planar_complex_array.cu) + +The CUTLASS Library defines enumerated types describing numeric data types, matrix and tensor +layouts, math operation classes, complex transformations, and more. + +Client applications should specify [`tools/library/include`](/tools/library/include) in their +include paths and link against libcutlas_lib.so. + +The CUTLASS SDK example [10_planar_complex](/examples/10_planar_complex/CMakeLists.txt) specifies +its dependency on the CUTLASS Library with the following CMake command. +``` +target_link_libraries( + 10_planar_complex + PRIVATE + cutlass_lib + cutlass_tools_util_includes +) +``` + +A sample kernel launch from host-side C++ is shown as follows. + +```c++ +#include "cutlass/library/library.h" +#include "cutlass/library/handle.h" + +int main() { + + // + // Define the problem size + // + int M = 512; + int N = 256; + int K = 128; + + float alpha = 1.25f; + float beta = -1.25f; + + // + // Allocate device memory + // + + cutlass::HostTensor A({M, K}); + cutlass::HostTensor B({K, N}); + cutlass::HostTensor C({M, N}); + + float const *ptrA = A.device_data(); + float const *ptrB = B.device_data(); + float const *ptrC = C.device_data(); + float *ptrD = C.device_data(); + + int lda = A.device_ref().stride(0); + int ldb = B.device_ref().stride(0); + int ldc = C.device_ref().stride(0); + int ldd = D.device_ref().stride(0); + + // + // CUTLASS Library call to execute device GEMM + // + + cutlass::library::Handle handle; + + // + // Launch GEMM on CUDA device. + // + + cutlass::Status status = handle.gemm( + M, + N, + K, + + cutlass::library::NumericTypeID::kF32, // data type of internal accumulation + cutlass::library::NumericTypeID::kF32, // data type of alpha/beta scalars + + &alpha, // pointer to alpha scalar + + cutlass::library::NumericTypeID::kF32, // data type of A matrix + cutlass::library::LayoutTypeID::kColumnMajor, // layout of A matrix + ptrA, // pointer to A matrix in device memory + lda, // leading dimension of A matrix + + cutlass::library::NumericTypeID::kF32, // data type of B matrix + cutlass::library::LayoutTypeID::kColumnMajor, // layout of B matrix + ptrB, // pointer to B matrix in device memory + ldb, // leading dimension of B matrix + + &beta, // pointer to beta scalar + + cutlass::library::NumericTypeID::kF32, // data type of C and D matrix + + ptrC, // pointer to C matrix in device memory + ldc, // leading dimension fo C matrix + + ptrD, // pointer to D matrix in device memory + ldd // leading dimension of D matrix + ); + + if (status != cutlass::Status::kSuccess) { + return -1; + } + + return 0; +} +``` + +# Example CMake Commands + +To instantiate all operations supporting all tile sizes, data types, and alignment constraints, specify +`-DCUTLASS_LIBRARY_KERNELS=all` when running `cmake`. +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS='70;75;80' -DCUTLASS_LIBRARY_KERNELS=all +``` +The above command line generates about twenty thousand kernels targeting NVIDIA Ampere, Turing, and Volta architectures. +Compiling thousands of kernels for three different architectures is time-consuming. Additionally, this would also result +in a large binary size and on some platforms linker to fail on building the library. + +Enabling the "unity build" instantiates multiple kernel instances in each compilation unit, thereby reducing binary size +and avoiding linker limitations on some platforms. +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" -DCUTLASS_LIBRARY_KERNELS=all -DCUTLASS_UNITY_BUILD_ENABLED=ON +``` + +It is advised to only compile CUTLASS kernels for NVIDIA architectures one plans on running. Furthermore, kernels +can be selectively included in the CUTLASS Library by specifying filter strings and wildcard characters when executing CMake. + +Several examples are defined below for convenience. They may be combined as a comma-delimited list. +Compling only the kernels desired reduces compilation time. + + +## GEMM CMake Examples +**Example.** All GEMM kernels targeting NVIDIA Ampere Tensor Cores. +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS=80 -DCUTLASS_LIBRARY_KERNELS=tensorop*gemm +``` + +**Example.** All GEMM kernels targeting NVIDIA Turing Tensor Cores. +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS=75 -DCUTLASS_LIBRARY_KERNELS=tensorop*gemm +``` + +**Example.** All GEMM kernels with FP32 accumulation targeting NVIDIA Ampere, Turing, and Volta architectures. +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" -DCUTLASS_LIBRARY_KERNELS=s*gemm +``` + +**Example.** All kernels which expect A and B to be column-major or row-major targeting NVIDIA Ampere, Turing, and Volta architectures. +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" -DCUTLASS_LIBRARY_KERNELS=gemm*nn,gemm*tt +``` + +**Example.** All planar complex GEMM variants targeting NVIDIA Ampere, Turing, and Volta architectures. +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" -DCUTLASS_LIBRARY_KERNELS=planar_complex +``` + +## Convolution CMake Examples +**Example.** All convolution kernels targeting NVIDIA Ampere's 16816 Tensor Core operation +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS='80' -DCUTLASS_LIBRARY_KERNELS=s16816fprop,s16816dgrad,s16816wgrad +``` + +**Example.** All forward propagation (fprop) convolution kernels targeting CUDA Cores for multiple NVIDIA architectures +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS='50;60;61;70;75;80' -DCUTLASS_LIBRARY_KERNELS=sfprop +``` + +**Example.** All forward propagation (fprop) convolution kernels with FP32 accumulation and FP16 input targeting NVIDIA Ampere's 16816 Tensor Core operation +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS='80' -DCUTLASS_LIBRARY_KERNELS=s16816fprop_*_f16 +``` + +**Example.** All backward weight gradient (wgrad) convolution kernels with FP32 accumulation, FP16 input, and optimized global memory iterator +targeting NVIDIA Ampere, Turing, and Volta Tensor Core operations +```bash +$ cmake .. -DCUTLASS_NVCC_ARCHS='70;75;80' -DCUTLASS_LIBRARY_KERNELS=tensorop*s*wgrad_optimized_f16 +``` + +# Copyright + +Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/terminology.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/terminology.md new file mode 100644 index 0000000000000000000000000000000000000000..e0f04790a3c9a25708b69e2c8f8dc9d6dfe25d26 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/terminology.md @@ -0,0 +1,113 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Terminology") + +[README](/README.md#documentation) > **Terminology** + +# CUTLASS Terminology + +**cute::Layout**: A `cute::Layout` vocabulary type composed of the hierarchical `cute::Shape` and `cute::Stride` +tuples that is used throughout CUTLASS 3.0 to represent and manipulate thread and data layouts. More details are included in the [CuTe specific tensor type documentation](/media/docs/cute/03_tensor.md). + +**cute::Tensor**: A pointer backed by a `cute::Layout` used to represent a tensor. More details are included in the [CuTe specific tensor type documentation](/media/docs/cute/03_tensor.md). + +**Capacity**: (scalar) physical number of elements in memory required to store a multidimensional object; expressed as the type's LongIndex type + - example: the capacity of a column-major matrix is `lda * N` + +**Element**: data type describing one item in a multidimensional tensor, array, or matrix + +**Extent**: (vector-valued quantity) the logical size of each dimension of a multidimensional index space. Consistent with the [C++ Standard Library](https://en.cppreference.com/w/cpp/types/extent). + - `Coord extent()` + - `Index extent(int dim)` + +**Fragment**: a register-backed array of elements used to store a thread's part of a tile + +**Index**: signed integer representing quantities aligned with a logical dimension + +**Layout**: functor mapping logical coordinates of a tensor to linear offset (as LongIndex); owns stride vectors, if any. + +**LongIndex**: signed integer representing offsets in memory; typically wider than Index type + +**Numeric Type**: a CUTLASS data type used to represent real-valued quantities; is trivially copyable. + +**Pitch Linear**: linear memory allocation obtained from a user-defined 2-D size, which specifies the +contiguous and strided dimensions of a tile. + +**Planar Complex**: representation of complex tensors as two real-valued tensors, with real elements in one part and imaginary elements in another part of identical layout, separated by an offset + +**Policy**: additional details extending the interface of a template guiding internal implementation; + typically used to target specific design points known to be efficient + +**Rank**: number of dimensions in a multidimensional index space, array, tensor, or matrix. Consistent with + [C++ Standard Library](https://en.cppreference.com/w/cpp/types/rank) + +**Register**: in device code, registers are the most efficient storage for statically sized arrays of elements. + Arrays may be expected to be stored in registers if all accesses are made via constexpr indices or within + fully unrolled loops. + +**Residue**: partial tile or matrix computation which may require special accommodation for functional correctness or performance + +**Size**: (scalar) number of logical elements in a tensor; equal to the product of each member of `extent()` + - `LongIndex size()` + +`sizeof_bits::value` - template pattern returning the size of a numeric type or array in units of bits + +**Storage**: when appropriate, refers to some alternative type used to store a packed collection of elements; + may be used to handle bit-level packing or make types safe for use in unions + +**TensorRef**: contains base pointer and _Layout_ object for referencing infinitely-sized tensor object + +**TensorView**: contains _TensorRef_ and extent of a finite mathematical object + +**Tile**: partitions of a tensor that have constant extents and layout known at compile time + +**Trait**: characteristics of a fully-specialized type, typically used in metaprogramming reflection + +**View**: an object containing references to a data structure that it does not own; typically, construction of views is lightweight + +**Warp**: a collection of hardware threads executing in lock-step; warp-level operations typically rely on cooperation among the threads within the warp + +`AlignedBuffer`: statically sized array type; union-safe, no construction guarantee for elements + +`Array`: container for holding numeric types - handles bit packing for small numeric types (e.g. int4_t, uint4_t, bin1_t) + `sizeof(Array)` - gives expected value in units of bytes with minimum storage of `1 B`: (sizeof_bits::value * N) / 8 + +**Operator**: an object performing a computation on matrix or tensor objects. May be further refined by scope within the execution model hierarchy. Deprecated starting CUTLASS 3.0, +replaced by [MMA and Copy atoms from CuTe](/media/docs/cute/0t_mma_atom.md). + +**Tile Iterator**: abstraction for accessing and traversing a sequence of tiles in a tensor; CUTLASS specifies + [formal concepts for tile iterators](tile_iterator_concept.md). Deprecated starting CUTLASS 3.0. + Replaced by `cute::Layout` in equivalent usage scenarios to represent data tensors. + +**Thread Map**: abstraction for defining how threads are mapped to a given tile. Deprecated starting CUTLASS 3.0. + Replaced by `cute::Layout` in equivalent usage scenarios to represent thread tensors. + +# Copyright + +Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/tile_iterator_concept.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/tile_iterator_concept.md new file mode 100644 index 0000000000000000000000000000000000000000..efff36131dffb54d9b60ffd2c5ae859f99b5c79e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/tile_iterator_concept.md @@ -0,0 +1,504 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Tile Iterator Concepts") + +[README](/README.md#documentation) > **Tile Iterator Concepts** + +# Tile Iterator Concepts + +Note: CUTLASS 3.0 deprecates all tile access iterators in favour of CuTe's single +vocabulary type `cute::Tensor`, which is parameterized on `cute::Layout`. +`cute::Tensor`s can therefore be manipulated with the same layout algebra as all CuTe layouts. +This removes the need for bespoke types that encapsulate iterator properties. +The following text thus only applies to legacy CUTLASS 2.x API and related types. + +CUTLASS 2.x implements generic algorithms on tiles of matrix or tensors of constant size. These may +be considered as partitions of tensors of infinite size, with a range of partitions accessible +by _tile iterators_. + +Various data structures may make operations such as random access to tiles inexpensive, +while data structures may not offer random access at all. For example, iterating over a linked +list of matrices requires sequential traversal. Algorithms implemented in terms of sequences of tiles +should require only the minimum set of operators be defined for tile iterators. + +This document describes a set of C++ concepts which may be used to define tile iterators used +by CUTLASS algorithms. ("Concept" here does not refer to a C++20 concept that uses the `concept` keyword. +Rather, it refers to a set of requirements on a type.) +Each concept specifies members and type definitions that a tile iterator +must implement. Frequently, a tile iterator implements several concepts, and its members are +the union of the members from each individual concept. These definitions were inspired by +[Boost "New style" iterator concepts](https://www.boost.org/doc/libs/1_40_0/libs/iterator/doc/new-iter-concepts.html). + +The set of all possible combinations of these concepts is quite large, however most tile iterator +templates can be described by one of several combinations. The section +Frequently Used Tile Iterator Concepts describes several common interfaces used throughout CUTLASS. + +## Definitions + +**_Base Tile Iterator Concept_.** All tile iterators must describe an _Element_ type as well as a _Shape_. +```c++ +/// Base concept for all tile iterators +struct TileIteratorConcept { + using Element; ///< Element type composing tile (concept: numeric type or Array<>) + using Shape; ///< Shape type describing extent of tile. The shape concept depends + /// on iterator implementation. +}; +``` + +**_Contiguous Memory Tile Iterator Concept_.** Iterators over tiles stored arbitrarily within +a continuous block of data in memory. Linear offset in units of _Element_ may be added to +internally held pointers to 'move' the iterator in memory. + +```c++ +/// Tile iterator over partitions of a tensor in contiguous memory which may be referenced via a +/// TensorRef object. +struct ContiguousMemoryTileIterator : public TileIteratorConcept { + + using Index; ///< index type used to add pointer offsets + + /// Adds a linear offset in units of Element to internal pointer(s) into tensor + CUTLASS_DEVICE + void add_pointer_offset(Index pointer_offset); +}; +``` + +**_Readable Tile Iterator Concept_.** Iterators that may be read from define a `Fragment` type holding +each thread's part of the data to be loaded. An explicit `load()` method reads the tile from memory, +and places each thread's part in its `Fragment` object. + +```c++ +/// Tile iterator capable of loading tiles from memory into fragments +struct ReadableTileIteratorConcept { + + using Fragment; ///< fragment object derived from cutlass::Array + + CUTLASS_DEVICE + void load(Fragment &frag); ///< loads a fragment from memory +}; +``` + +**_Readable Contiguous Tile Iterator Concept_.** Iterators reading from contiguous memory +support an optional pointer offset that is added to any internally managed pointers before +performing the load. This provides a convenient method to fold an offset in with load +operations. + +```c++ +/// Union of the following tile iterator concepts: +/// +/// - ReadableTileIteratorConcept +/// - ContiguousMemoryTileIterator +/// +struct ReadableContiguousTileIteratorConcept : + public ReadableTileIteratorConcept, + public ContiguousMemoryTileIterator { + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + Fragment &frag, ///< fragment to load from the tensor + Index pointer_offset); ///< loads a tile with a linear offset +}; +``` + +**_Writeable Tile Iterator Concept_.** Iterators that may write to memory define a `Fragment` type holding +each thread's part of the data to be written. An explicit `store()` method writes the tile to memory. + +```c++ +/// Tile iterator capable of storing tiles from memory +struct WriteableTileIteratorConcept { + + using Fragment; ///< fragment object derived from cutlass::Array + + /// Stores a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag); ///< stores a fragment to memory +}; +``` + +**_Writeable Contiguous Tile Iterator Concept_.** Iterators writing to contiguous memory +support an optional pointer offset that is added to any internally managed pointers before +performing the store operation. This provides a convenient method to fold an offset into the +store. +```c++ +/// Union of the following tile iterator concepts: +/// +/// - WriteableTileIteratorConcept +/// - ContiguousMemoryTileIterator +/// +struct WriteableContiguousTileIteratorConcept : + public WriteableTileIteratorConcept, + public ContiguousMemoryTileIterator { + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, ///< fragment to store to the tensor + Index pointer_offset); ///< stores a tile with a linear offset +}; +``` + +**_Forward Tile Iterator Concept_.** This concept offers traversal "forward" by one tile in +a pre-defined sequence. Often, this sequence is relevant to the context in which the iterator +was defined, such as along the _K_ dimension of a GEMM operation. Equality operators are defined +to determine whether two iterators point to the same tile. +```c++ +/// Tile iterator that may be incremented along a traversal sequence. +struct ForwardTileIteratorConcept { + + CUTLASS_DEVICE bool operator==(TileIterator const &it); ///< true if iterators point to same tile, false if otherwise + CUTLASS_DEVICE bool operator!=(TileIterator const &it); ///< false if iterators point to same tile, true if otherwise + + CUTLASS_DEVICE ForwardTileIteratorConcept & operator++(); ///< pre-increment - advance to next tile in sequence + CUTLASS_DEVICE ForwardTileIteratorConcept operator++(int); ///< post-increment - advance to next tile in sequence +}; +``` + +**_Bidirectional Tile Iterator Concept_.** This concept permits traversal both forward and backward. +```c++ +/// Tile iterator which may be traverse in both directions along a defined sequence. +struct BidirectionalTileIteratorConcept : public ForwardTileIteratorConcept { + + CUTLASS_DEVICE + BidirectionalTileIteratorConcept & operator--(); ///< pre-decrement - traverse to previous tile in sequence + + CUTLASS_DEVICE + BidirectionalTileIteratorConcept operator--(int); ///< post-decrement - traverse to previous tile in sequence +}; +``` + +**_Random Access Tile Iterator Concept_.** This iterator defines random access operations in the logical +coordinate system of the underlying tensor. Thus, tensors must have a defined _Layout_ with associated +_TensorCoord_ coordinate describing logical position within the tensor and _TensorRef_ reference type. +It may be advanced forward or backwards by an offset specified as units of whole tiles along each dimension. +```c++ +/// Tile iterator offering random access to tiles in contiguous memory. +struct RandomAccessTileIteratorConcept : + public BidirectionalTileIteratorConcept, + public ContiguousMemoryTileIterator { + + using Layout; ///< Layout object mapping + using TensorRef; ///< Tensor Reference object + using TensorCoord; ///< Logical coordinate in referenced tensor + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + RandomAccessTileIteratorConcept & add_tile_offset(TensorCoord const &tile_offset); + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + RandomAccessTileIteratorConcept & operator+=(TensorCoord const &tile_offset); + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + RandomAccessTileIteratorConcept & operator-=(TensorCoord const &tile_offset); +}; +``` + +**_Readable Random Access Tile Iterator Concept_.** Readable random access iterators +accept an additional tile offset in logical coordinate space when loading fragments. +```c++ +/// Loads a fragment with a logical coordinate offset in units of whole tiles. +struct ReadableRandomAccessTileIteratorConcept : + public RandomAccessTileIteratorConcept, + public ReadableTileIteratorConcept { + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset); ///< loads a tile with a logical offset in units of whole tiles +}; +``` + +**_Readable Random Access Contiguous Tile Iterator Concept_.** Readable random access iterators +accept an additional tile offset in logical coordinate space when loading fragments. +```c++ +/// Loads a fragment with a logical coordinate offset in units of whole tiles. +struct ReadableRandomAccessContiguousTileIteratorConcept : + public ReadableRandomAccessTileIteratorConcept, + ReadableContiguousTileIteratorConcept { + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles + Index pointer_offset); ///< loads a tile with a logical offset AND a pointer offset +}; +``` +**_Writeable Random Access Tile Iterator Concept_.** Writeable random access iterators +accept an additional tile offset in logical coordinate space when storing fragments. +```c++ +/// Stores a fragment with a logical coordinate offset in units of whole tiles. +struct WriteableRandomAccessTileIteratorConcept : + public RandomAccessTileIteratorConcept, + public WriteableContiguousTileIteratorConcept { + + /// Stores a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + Fragment const &frag, ///< fragment to store to the location pointed to by the tensor + TensorCoord const &tile_offset); ///< stores a tile with a given offset from the current iterator +}; +``` + +**_Writeable Random Access Contiguous Tile Iterator Concept_.** Writeable random access iterators +accept an additional tile offset in logical coordinate space when storing fragments. +```c++ +/// Stores a fragment with a logical coordinate offset in units of whole tiles. +struct WriteableRandomAccessContiguousTileIteratorConcept : + public WriteableRandomAccessTileIteratorConcept, + public WriteableContiguousTileIteratorConcept { + + /// Stores a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + Fragment const &frag, ///< fragment to store to the location pointed to by the tensor + TensorCoord const &tile_offset, ///< stores a tile with a logical offset in units of whole tiles + Index pointer_offset); ///< stores a tile witha logical offset AND a pointer offset +}; +``` + +**_Masked Tile Iterator Concept_.** Matrix and tensors may not always be multiples of whole tiles. +Masked tile iterators define a `Mask` type which may be used to guard accesses to memory. The +semantics and interface of this `Mask` are implementation-defined details of each tile iterator, +but several convenience methods are defined for interacting with the mask such as efficiently +clearing or enabling all guarded memory accesses. +```c++ +/// Supports iterating over tiles that are not 'whole' in memory. Iterator maintains a mask object +/// which guards against out-of-bounds access. +/// +/// Note, this concept definition does not formally define operations on the mask or methods it +/// supports. These remain implementation-dependent details of iterators implementing this concept. +struct MaskedTileIteratorConcept { + + using Mask; ///< mask object used to guard against acceses. + + CUTLASS_DEVICE void clear_mask(); ///< efficiently disables all accesses guarded by mask + CUTLASS_DEVICE void enable_mask(); ///< efficiently enables all accesses guarded by mask + + CUTLASS_DEVICE void get_mask(Mask &mask); ///< gets the mask + CUTLASS_DEVICE void set_mask(Mask const &mask); ///< sets the mask +}; +``` + +## Frequently Used Tile Iterator Concepts + +This section describes several frequently used compositions of the basic tile iterator concepts. They are +listed here as complete type declarations for convenience of the reader. + +**_Writeable, Readable, Forward, Contiguous Memory Tile Iterator Concept_.** +This combines several of the basic iterator concepts to +yield a tile iterator capable of loading and storing tiles as well as advancing forward along a traversal sequence. +```c++ +/// This tile iterator embodies several of the above: +/// +/// - ForwardTileIteratorConcept +/// - ReadableContiguousTileIteratorConcept +/// - WriteableContiguousTileIteratorConcept +/// +/// It is restated explicitly for convenience of the reader. +/// +struct WriteableReadableForwardContiguousTileIteratorConcept { + + // + // Data types + // + + using Element; ///< Element type composing tile. + using Shape; ///< Shape type describing extent of tile. The shape concept depends + /// on iterator implementation + using Index; ///< index type used as base for TensorCoord + using Fragment; ///< fragment object derived from cutlass::Array + + // + // Methods + // + + /// Adds a linear offset in units of Element to internal pointer(s) into tensor + CUTLASS_DEVICE + void add_pointer_offset(Index offset); + + /// true if iterators point to same tile, false if otherwise + CUTLASS_DEVICE bool operator==(WriteableReadableForwardContiguousTileIteratorConcept const &it); + + ///< false if iterators point to same tile, true if otherwise + CUTLASS_DEVICE bool operator!=(WriteableReadableForwardContiguousTileIteratorConcept const &it); + + /// pre-increment - traverse to next tile in sequence + CUTLASS_DEVICE + WriteableReadableForwardContiguousTileIteratorConcept & + operator++(); + + ///< post-increment - traverse to next tile in sequence + CUTLASS_DEVICE + WriteableReadableForwardContiguousTileIteratorConcept + operator++(int); + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag); ///< fragment to be loaded from memory + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + Fragment &frag, ///< fragment to be loaded from memory + Index pointer_offset); ///< linear offset (in units of Element) when loading + + /// Stores a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag); ///< fragment to store to memory + + /// Stores a fragment from memory with additional logical offset + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, ///< fragment to store to memory + Index pointer_offset); ///< linear offset (in units of Element) when storing +}; +``` + +**_Writeable, Readable, Random Access, Contiguous Memory Tile Iterator Concept_.** +This combines several of the basic iterator concepts to +yield a tile iterator with random access suitable for loading matrix operands for GEMM. +```c++ +/// This tile iterator embodies several of the above: +/// +/// - ReadableRandomAccessContiguousTileIteratorConcept +/// - WriteableRandomAccessContiguousTileIteratorConcept +/// +/// It is restated explicitly for convenience of the reader. +/// +struct WriteableReadableRandomAccessContiguousTileIteratorConcept { + + // + // Data types + // + + using Element; ///< Element type composing tile. + using Shape; ///< Shape type describing extent of tile. The shape concept depends + /// on iterator implementation + using Layout; ///< Layout object mapping + using TensorRef; ///< Tensor Reference object + using TensorCoord; ///< Logical coordinate in referenced tensor + using Index; ///< index type used as base for TensorCoord + using Fragment; ///< fragment object derived from cutlass::Array + + // + // Methods + // + + /// Adds a linear offset in units of Element to internal pointer(s) into tensor + CUTLASS_DEVICE + void add_pointer_offset(Index pointer_offset); + + /// true if iterators point to same tile, false if otherwise + CUTLASS_DEVICE bool operator==(WriteableReadableRandomAccessContiguousTileIteratorConcept const &it); + + ///< false if iterators point to same tile, true if otherwise + CUTLASS_DEVICE bool operator!=(WriteableReadableRandomAccessContiguousTileIteratorConcept const &it); + + /// pre-increment - traverse to next tile in sequence + CUTLASS_DEVICE + WriteableReadableRandomAccessContiguousTileIteratorConcept & + operator++(); + + ///< post-increment - traverse to next tile in sequence + CUTLASS_DEVICE + WriteableReadableRandomAccessContiguousTileIteratorConcept + operator++(int); + + /// pre-decrement - traverse to previous tile in sequence + CUTLASS_DEVICE + WriteableReadableRandomAccessContiguousTileIteratorConcept & + operator--(); + + ///< post-decrement - traverse to previous tile in sequence + CUTLASS_DEVICE + WriteableReadableRandomAccessContiguousTileIteratorConcept + operator--(int); + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + WriteableReadableRandomAccessContiguousTileIteratorConcept & operator+=(TensorCoord const &tile_offset); + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + WriteableReadableRandomAccessContiguousTileIteratorConcept & operator-=(TensorCoord const &tile_offset); + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment &frag); ///< fragment to be loaded from memory + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + Fragment &frag, ///< fragment to be loaded from memory + Index pointer_offset); ///< linear offset (in units of Element) when loading + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to be loaded from memory + TensorCoord const &tile_offset); ///< loads a tile with a logical offset in units of whole tiles + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to be loaded from memory + TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles + Index pointer_offset); ///< loads a tile with a logical offset AND a pointer offset + + /// Stores a fragment to memory + CUTLASS_DEVICE + void store(Fragment const &frag); ///< fragment to store to memory + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, ///< fragment to store to memory + Index pointer_offset); ///< linear offset (in units of Element) when loading + + /// Stores a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + Fragment const &frag, ///< fragment to store to memory + TensorCoord const &tile_offset); ///< stores with logical offset in units of whole tiles + + /// Stores a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + Fragment const &frag, ///< fragment to store to memory + TensorCoord const &tile_offset, ///< stores with logical offset in units of whole tiles + Index pointer_offset); +}; +``` + +# Copyright + +Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/utilities.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/utilities.md new file mode 100644 index 0000000000000000000000000000000000000000..c464f2007d5d5801bdec2ed923c474fd58dd78b5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/media/docs/utilities.md @@ -0,0 +1,417 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Code Organization") + +[README](/README.md#documentation) > **CUTLASS Utilities** + +Note: This document discusses utilities commonly used with code that targets CUTLASS 2.x. +Although CUTLASS 3.0's primary entry point APIs do not transact in these `cutlass::*` tensor types anymore, +users can still find them convenient for managing allocations with trivial affine layouts. +For more advanced host side tensor management, [`cute::Tensor`](/media/docs/cute/03_tensor.md)s +can be used on either host or device for any memory space and full expressive power of +[`cute::Layout`](/media/docs/cute/01_layout.md)s. + +# CUTLASS Utilities + +CUTLASS utilities are additional template classes that facilitate recurring tasks. These are +flexible implementations of needed functionality, but they are not expected to be efficient. + +Applications should configure their builds to list `/tools/util/include` in their include +paths. + +Source code is in [`/tools/util/include/cutlass/util/`](/tools/util/include/cutlass/util). + +## Tensor Allocation and I/O + +To allocate a tensor with storage in both host and device memory, use `HostTensor` in +[`cutlass/util/host_tensor.h`](/tools/util/include/cutlass/util/host_tensor.h) + +```c++ +template +class HostTensor; +``` + +This class is compatible with all CUTLASS numeric data types and layouts. + +**Example:** column-major matrix storage of single-precision elements. +```c++ +#include +#include + +int main() { + int rows = 32; + int columns = 16; + + cutlass::HostTensor tensor({rows, columns}); + + return 0; +} +``` + +Internal host-side storage may be accessed via the following methods. +```c++ +float *host_ptr = tensor.host_data(); +cutlass::TensorRef host_ref = tensor.host_ref(); +cutlass::TensorView host_view = tensor.host_view(); +``` + +Device memory may be accessed similarly. +```c++ +float *device_ptr = tensor.device_data(); +cutlass::TensorRef device_ref = tensor.device_ref(); +cutlass::TensorView device_view = tensor.device_view(); +``` + +Printing to human-readable CSV output is accoplished with `std::ostream::operator<<()` defined in +[`cutlass/util/tensor_view_io.h`](/tools/util/include/cutlass/util/tensor_view_io.h). +Note, this assumes all views refer to host memory. +```c++ +#include + +int main() { + // Obtain a TensorView into host memory + cutlass::TensorView view = tensor.host_view(); + + // Print to std::cout + std::cout << view << std::endl; + + return 0; +} +``` + +Host and device memory must be explicitly synchronized by the application. +```c++ +float idx = 0; + +for (int i = 0; i < rows; ++i) { + for (int j = 0; j < columns; ++j) { + + // Write the element at location {i, j} in host memory + tensor.host_ref().at({i, j}) = idx; + + idx += 0.5f; + } +} + +// Copy host memory to device memory +tensor.sync_device(); + +// Obtain a device pointer usable in CUDA kernels +float *device_ptr = tensor.device_data(); +``` + +`HostTensor<>` is usable by all CUTLASS layouts including interleaved layouts. +```c++ +int rows = 4; +int columns = 3; + +cutlass::HostTensor> tensor({rows, columns}); + +for (int i = 0; i < rows; ++i) { + for (int j = 0; j < columns; ++j) { + + // Write the element at location {i, j} in host memory + tensor.host_ref().at({i, j}) = float(i) * 1.5f - float(j) * 2.25f; + } +} + +std::cout << tensor.host_view() << std::endl; +``` + +## Device Allocations + +To strictly allocate memory on the device using the smart pointer pattern to manage allocation and deallocation, +use `cutlass::DeviceAllocation<>`. + +**Example:** allocating an array in device memory. +```c++ +#include +#include +#include + +__global__ void kernel(float *device_ptr) { + +} + +int main() { + + size_t N = 1024; + + cutlass::DeviceAllocation device_alloc(N); + + // Call a CUDA kernel passing device memory as a pointer argument + kernel<<< grid, block >>>(alloc.get()); + + if (cudaGetLastError() != cudaSuccess) { + return -1; + } + + // Device memory is automatically freed when device_alloc goes out of scope + + return 0; +} +``` + +## Tensor Initialization + +CUTLASS defines several utility functions to initialize tensors to uniform, procedural, +or randomly generated elements. These have implementations using strictly host code and +implementations using strictly CUDA device code. + +`TensorFill()` for uniform elements throughout a tensor. +```c++ +#include +#include +#include +#include + +int main() { + int rows = 128; + int columns = 64; + + float x = 3.14159f; + + cutlass::HostTensor tensor({rows, columns}); + + // Initialize in host memory + cutlass::reference::host::TensorFill(tensor.host_view(), x); + + // Initialize in device memory + cutlass::reference::device::TensorFill(tensor.device_view(), x); + + return 0; +} +``` + +`TensorFillRandomUniform()` for initializing elements to a random uniform distribution. +The device-side implementation uses CURAND to generate random numbers. +```c++ +#include +#include +#include +#include + +int main() { + int rows = 128; + int columns = 64; + + double maximum = 4; + double minimum = -4; + uint64_t seed = 0x2019; + + cutlass::HostTensor tensor({rows, columns}); + + // Initialize in host memory + cutlass::reference::host::TensorFillRandomUniform( + tensor.host_view(), + seed, + maximum, + minimum); + + // Initialize in device memory + cutlass::reference::device::TensorFillRandomUniform( + tensor.device_view(), + seed, + maximum, + minimum); + + return 0; +} +``` + + +`TensorFillRandomGaussian()` for initializing elements to a random gaussian distribution. +The device-side implementation uses CURAND to generate random numbers. +```c++ +#include +#include +#include +#include + +int main() { + + int rows = 128; + int columns = 64; + + double mean = 0.5; + double stddev = 2.0; + uint64_t seed = 0x2019; + + cutlass::HostTensor tensor({rows, columns}); + + // Initialize in host memory + cutlass::reference::host::TensorFillRandomGaussian( + tensor.host_view(), + seed, + mean, + stddev); + + // Initialize in device memory + cutlass::reference::device::TensorFillRandomGaussian( + tensor.device_view(), + seed, + mean, + stddev); + + return 0; +} +``` + +Each of these functions accepts an additional argument to specify how many bits of +the mantissa less than 1 are non-zero. This simplifies functional comparisons when +exact random distributions are not necessary, since elements may be restricted to +integers or values with exact fixed-point representations. + +```c++ +#include +#include +#include +#include + +int main() { + + int rows = 128; + int columns = 64; + + double mean = 0.5; + double stddev = 2.0; + uint64_t seed = 0x2019; + + int bits_right_of_binary_decimal = 2; + + cutlass::HostTensor tensor({rows, columns}); + + // Initialize in host memory + cutlass::reference::host::TensorFillRandomGaussian( + tensor.host_view(), + seed, + mean, + stddev, + bits_right_of_binary_decimal); + + // Initialize in device memory + cutlass::reference::device::TensorFillRandomGaussian( + tensor.device_view(), + seed, + mean, + stddev, + bits_right_of_binary_decimal); + + return 0; +} +``` + +These utilities may be used for all data types. + +**Example:** random half-precision tensor with Gaussian distribution. +```c++ +#include +#include +#include +#include +#include + +int main() { + int rows = 128; + int columns = 64; + + double mean = 0.5; + double stddev = 2.0; + uint64_t seed = 0x2019; + + // Allocate a column-major tensor with half-precision elements + cutlass::HostTensor tensor({rows, columns}); + + // Initialize in host memory + cutlass::reference::host::TensorFillRandomGaussian( + tensor.host_view(), + seed, + mean, + stddev); + + // Initialize in device memory + cutlass::reference::device::TensorFillRandomGaussian( + tensor.device_view(), + seed, + mean, + stddev); + + return 0; +} +``` + +## Reference Implementations + +CUTLASS defines reference implementations usable with all data types and layouts. These are +used throughout the unit tests. + +**Example:** Reference GEMM implementation with mixed precision internal computation. +```c++ +#include +#include + +#include +#include + +int main() { + + int M = 64; + int N = 32; + int K = 16; + + float alpha = 1.5f; + float beta = -1.25f; + + cutlass::HostTensor A({M, K}); + cutlass::HostTensor B({K, N}); + cutlass::HostTensor C({M, N}); + + cutlass::reference::host::Gemm< + cutlass::half_t, cutlass::layout::ColumnMajor, // ElementA and LayoutA + cutlass::half_t, cutlass::layout::ColumnMajor, // ElementB and LayoutB + cutlass::half_t, cutlass::layout::ColumnMajor, // ElementC and LayoutC + float, // scalar type (alpha and beta) + float> gemm_op; // internal accumulation type + + gemm_op( + {M, N, K}, // problem size + alpha, // alpha scalar + A.host_view(), // TensorView to host memory + B.host_view(), // TensorView to host memory + beta, // beta scalar + C.host_view(), // TensorView to host memory + D.host_view()); // TensorView to device memory + + return 0; +} +``` + +# Copyright + +Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/README.md b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..63388b077def6c06b396915786dbd936399fa2b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/README.md @@ -0,0 +1,173 @@ +![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "Complete CUDA GEMM decomposition") + +# Python packages associated with CUTLASS +This directory contains Python packages that are associated with CUTLASS: + +* `cutlass`: the CUTLASS Python interface, which enables one to compile and run CUTLASS kernels from within Python +* `cutlass_library`: utilities used for enumerating and emitting C++ code for CUTLASS kernels + +## CUTLASS Python Interface +The CUTLASS Python interface enables one to compile and run CUTLASS operations from within Python. + +```python +import cutlass +import numpy as np + +plan = cutlass.op.Gemm(element=np.float16, layout=cutlass.LayoutType.RowMajor) +A, B, C, D = [np.ones((4096, 4096), dtype=np.float16) for i in range(4)] +plan.run(A, B, C, D) +``` + +**NOTE:** The CUTLASS Python interface is currently an experimental release. The API may change in the future. +We welcome feedback from the community. + +### Overview +The CUTLASS Python interface aims to provide an ease-of-use interface for using CUTLASS via Python. Toward this goal, +the CUTLASS Python interface attempts to: + +* Present high-level interfaces for operators that require only few parameters +* Select sensible default configurations for an operator given the parameters that have been specified +* Enumerate configurations for users that are known to work in a given setting +* Reduce the occurrence of C++ compile-time errors in favor of descriptive Python exceptions +* Make it easy to export CUTLASS kernels to framework extensions (e.g., PyTorch CUDA extensions) + +#### Non-goals +The CUTLASS Python interface does not intended to: + +**Select optimal kernel configurations.** +As an ease-of-use interface, the default selections for operator parameters made by the CUTLASS Python interface may +not achieve the highest possible performance in all scenarios. Users wishing to achieve the highest performance possible +should consider profile different combinations of configuration parameters, or use a library such as [cuBLAS](https://developer.nvidia.com/cublas) +that contains heuristics for selecting kernels. + +**Act as a fast container for CUTLASS kernels.** +The CUTLASS Python interface does not strive to minimize overhead in its Python functions surrounding the running of a kernel. +Those wishing to deploy a CUTLASS kernel should consider either using the C++ emitted by the Python interface directly, or using +one of the CUTLASS emitters for automatically creating a framework extension for the kernel (e.g., a PyTorch CUDA extension). + +**Act as a Python-to-CUDA-kernel JIT compilation engine.** +The CUTLASS Python interface intends to enable one to use CUTLASS via Python. It can be used by frameworks for JIT compiling +Python to CUDA kernels, but does not set out to be such a framework. + +#### Comparison to PyCUTLASS +The CUTLASS Python interface builds atop CUTLASS's [PyCUTLASS](https://github.com/NVIDIA/cutlass/tree/v3.0.0/tools/library/scripts/pycutlass) library. PyCUTLASS enables +one to declare, compile, and run GEMMs, convolutions, and grouped GEMM operators with nearly the same configuration +space as CUTLASS's C++ interface. While this flexibility enables one to achieve the similar levels of functionality +as available in CUTLASS's C++ interface, it comes with the burden of needing to specify many configuration parameters +to operators -- similar to what one must do in specifying template parameters to operations in CUTLASS's C++ interface. + +In contrast, the CUTLASS Python interface aims to provide a higher-level API for declaring, emitting, and compiling +kernels that does not require exhaustively defining template parameters. + +### Current functionality +The CUTLASS Python interface currently supports the following operations: +* GEMMs +* GEMMs with fused elementwise epilogues (e.g., ReLU) (for pre-SM90 kernels) +* Stream K swizzling (for pre-SM90 kernels) +* Grouped GEMM (for pre-SM90 kernels) + +### Getting started +We recommend using the CUTLASS Python interface via one of the Docker images located in the [docker](/python/docker) directory. + +```bash +docker build -t cutlass-cuda12.1:latest -f docker/Dockerfile-cuda12.1-pytorch . +docker run --gpus all -it --rm cutlass-cuda12.1:latest +``` + +The CUTLASS Python interface has been tested with CUDA 11.8, 12.0, and 12.1 on Python 3.8.10 and 3.9.7. + +#### Optional environment variables +Prior to installing the CUTLASS Python interface, one may optionally set the following environment variables: +* `CUTLASS_PATH`: the path to the cloned CUTLASS repository +* `CUDA_INSTALL_PATH`: the path to the installation of CUDA + +If these environment variables are not set, the installation process will infer them to be the following: +* `CUTLASS_PATH`: one directory level above the current directory (i.e., `$(pwd)/..`) +* `CUDA_INSTALL_PATH`: the directory holding `/bin/nvcc` for the first version of `nvcc` on `$PATH` (i.e., `which nvcc | awk -F'/bin/nvcc' '{print $1}'`) + +**NOTE:** The version of `cuda-python` installed must match the CUDA version in `CUDA_INSTALL_PATH`. + +#### Installation +The CUTLASS Python interface can currently be installed via: +```bash +python setup.py develop --user +``` +This will allow changes to the Python interface source to be reflected when using the Python interface. + +We plan to add support for installing via `python setup.py install` in a future release. + +### Examples +Jupyter notebook examples of using the CUTLASS Python interface are located in [examples/python](/examples/python). + +To launch these notebooks from this directory, run: +```bash +jupyter-lab ../examples/python +``` + +### Building documentation +The CUTLASS Python interface uses [Sphinx](https://www.sphinx-doc.org/en/master/) for documentation. + +Building the documentation requires additional packages. These can be installed via: +```bash +sudo apt-get install pandoc +pip install --upgrade Sphinx furo pandoc myst-parser sphinx-copybutton nbsphinx nbsphinx-link sphinx-inline-tabs +``` + +To build documentation, you must first have installed the CUTLASS Python interface via the +[installation instructions](#installation). + +Documentation can then be built via the following commands: +```bash +sphinx-apidoc -o docs_src/source/ cutlass/ cutlass/backend* +cd docs_src +make html +mv _build/* ../docs +``` + +## CUTLASS library package +[cutlass_library](/python/cutlass_library) contains utilities for enumerating and emitting CUTLASS C++ kernels. +It is used by the CUTLASS CMake system to construct a library of kernels that can be profiled using the CUTLASS profiler. + +To install the `cutlass_library` package, run +```bash +python setup_library.py develop --user +``` + +Alternatively, `cutlass_library` will automatically be installed if you install the CUTLASS Python interface package. + +You can also use the [generator.py](/python/cutlass_library/generator.py) script directly without installing the module via: +```bash +python -m cutlass_library.generator +``` + +# Copyright + +Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + +``` + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/__pycache__/setup.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/__pycache__/setup.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19eee3c672bf6218163a196aa612656557f68842 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/__pycache__/setup.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/__pycache__/setup_library.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/__pycache__/setup_library.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e32598dee24b1c3296c755fd684a1d0d7c1fc81 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/__pycache__/setup_library.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/__pycache__/setup_pycute.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/__pycache__/setup_pycute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0351280adcfcfad46497dabbf99b311b916e5b7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/__pycache__/setup_pycute.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..39e9b4076f68a8c28b98cc7856c3cc96c52f0715 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__init__.py @@ -0,0 +1,149 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +import logging +import os +import sys + +import cutlass_library + + +def _cutlass_path_from_dir() -> str: + cutlass_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../') + if not os.path.isdir(cutlass_path): + raise Exception(f'Environment variable "CUTLASS_PATH" is not defined, ' + f'and default path of {cutlass_path} does not exist.') + return cutlass_path + + +def _cuda_install_path_from_nvcc() -> str: + import subprocess + # Attempt to detect CUDA_INSTALL_PATH based on location of NVCC + result = subprocess.run(['which', 'nvcc'], capture_output=True) + if result.returncode != 0: + raise Exception(f'Unable to find nvcc via `which` utility.') + + cuda_install_path = result.stdout.decode('utf-8').split('/bin/nvcc')[0] + if not os.path.isdir(cuda_install_path): + raise Exception(f'Environment variable "CUDA_INSTALL_PATH" is not defined, ' + f'and default path of {cuda_install_path} does not exist.') + + return cuda_install_path + + +CUTLASS_PATH = os.getenv("CUTLASS_PATH", _cutlass_path_from_dir()) +CUDA_INSTALL_PATH = os.getenv("CUDA_INSTALL_PATH", _cuda_install_path_from_nvcc()) +CACHE_FILE = "compiled_cache.db" + +# Import types/methods from the CUTLASS utility libraries for profiler generation/emission under +from cutlass_library.library import ( + ArchitectureNames, + ComplexTransform, + ComplexTransformTag, + ConvKind, + ConvKindNames, + ConvKindTag, + ConvMode, + DataType, + DataTypeNames, + DataTypeSize, + DataTypeTag, + EpilogueFunctor, + EpilogueScheduleSuffixes, + EpilogueScheduleTag, + EpilogueScheduleType, + GemmKind, + GemmKindNames, + GemmUniversalMode, + IteratorAlgorithm, + IteratorAlgorithmNames, + IteratorAlgorithmTag, + LayoutTag, + LayoutType, + KernelScheduleSuffixes, + KernelScheduleTag, + KernelScheduleType, + MathInstruction, + MathOperation, + MathOperationTag, + OpcodeClass, + OpcodeClassNames, + OpcodeClassTag, + OperationKind, + SharedMemPerCC, + ShortComplexLayoutNames, + ShortDataTypeNames, + ShortLayoutTypeNames, + SplitKMode, + StrideSupport, + StrideSupportNames, + StrideSupportTag, + SwizzlingFunctor, + SwizzlingFunctorTag, + TensorDescription, + TileDescription, + TileSchedulerSuffixes, + TileSchedulerTag, + TileSchedulerType, + get_complex_from_real, +) + +this = sys.modules[__name__] +this.logger = logging.getLogger(__name__) + +def set_log_level(level: int): + """ + Sets the log level + + :param log_level: severity of logging level to use. See https://docs.python.org/3/library/logging.html#logging-levels for options + :type log_level: int + """ + this.logger.setLevel(level) + +set_log_level(logging.ERROR) + +from cutlass.library_defaults import OptionRegistry +from cutlass.backend.utils.device import device_cc + +this.option_registry = OptionRegistry(device_cc()) + +this.__version__ = '3.2.1' + +from cutlass.backend import get_memory_pool +from cutlass.emit.pytorch import pytorch +from cutlass.op.gemm import Gemm +from cutlass.op.conv import Conv2d, Conv2dFprop, Conv2dDgrad, Conv2dWgrad +from cutlass.op.gemm_grouped import GroupedGemm +from cutlass.op.op import OperationBase +from cutlass.backend.evt.ir.tensor import Tensor + +get_memory_pool(init_pool_size=2 ** 30, max_pool_size=2 ** 32) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b916eb270f6037781b4e180e25ff3c487ebb005 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__pycache__/library_defaults.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__pycache__/library_defaults.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf09ea27e548bb9e898cde4b53b73772ae01732e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__pycache__/library_defaults.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__pycache__/shape.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__pycache__/shape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a035cfb0d43146b6c93891cdda1f1db85425953b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__pycache__/shape.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__pycache__/swizzle.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__pycache__/swizzle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..099573187d4dee6b988aa84d3f39f03c41975be3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/__pycache__/swizzle.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b94c78d500f1541598976592232cf64fc8d19e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__init__.py @@ -0,0 +1,22 @@ +from cutlass.backend.arguments import * +from cutlass.backend.c_types import * +from cutlass.backend.compiler import ArtifactManager +from cutlass.backend.conv2d_operation import * +from cutlass.backend.epilogue import * +from cutlass.backend.frontend import * +from cutlass.backend.gemm_operation import * +from cutlass.backend.library import * +from cutlass.backend.memory_manager import PoolMemoryManager +from cutlass.backend.operation import * +from cutlass.backend.reduction_operation import * +from cutlass.backend.type_hint import * +from cutlass.backend.utils import * +from cutlass.backend.utils.device import device_cc +from cutlass.backend.utils.software import ( + CheckPackages, + SubstituteTemplate, + device_sm_count, + get_memory_pool, +) + +compiler = ArtifactManager() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03d49025a49b090f789333f53f5e2872e4387a73 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/arguments.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/arguments.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91cb68b6bcc10eff690b021441d7f3b79bb73037 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/arguments.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/c_types.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/c_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..114f768834e7a93a6dcd417b8dc8d03e028c9088 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/c_types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/compiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/compiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9361b75ae3416d5b17ce372746a71289388b4d8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/compiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/conv2d_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/conv2d_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0717fb813a3c14dcfa8571ebc52e502dbb879adf Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/conv2d_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/epilogue.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/epilogue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f361757d16756862e923b9615d2b4d7ebc043bd7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/epilogue.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/frontend.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/frontend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..086bfdda7fbd61d37fe9484e3731ce82efa40fe4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/frontend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/gemm_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/gemm_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22b976c0182968d8c49d49fcbfbc183109cb37d9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/gemm_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/library.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/library.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..340a099545401f591f53b40720bdff942c2ec762 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/library.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/memory_manager.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/memory_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8ba5b08fd1eb298928c55045193f5930888fe00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/memory_manager.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a792771a9734f265da7545e52e1d1ab9da7a01c Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/reduction_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/reduction_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc5edae0cfbb7e827a497d219164729805000099 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/reduction_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/type_hint.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/type_hint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e117f90d5fd6d8834c0c672bb07437941130a0c Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/__pycache__/type_hint.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/arguments.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/arguments.py new file mode 100644 index 0000000000000000000000000000000000000000..20a01e6267b96bc1a0bf5097b059e54f21548cd1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/arguments.py @@ -0,0 +1,121 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +from math import prod +from typing import Union + +from cuda import cuda, cudart +import numpy as np + +from cutlass.backend.frontend import CupyFrontend, NumpyFrontend, TorchFrontend +from cutlass.backend.utils.software import CheckPackages + +torch_available = CheckPackages().check_torch() +if torch_available: + import torch + +cupy_available = CheckPackages().check_cupy() +if cupy_available: + import cupy as cp + + +class ArgumentBase: + """ + Base class for operation arguments + """ + + def __init__( + self, + A: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]", + B: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]", + C: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]", + D: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]", + **kwargs, + ) -> None: + # tensor_C can be interpreted as the bias with bias=True in keyword args + if "bias" in kwargs.keys(): + self.bias = kwargs["bias"] + else: + # by default, tensor_C is not bias + self.bias = False + + # RMM buffers used to track tensor lifetime + self.buffers = {} + # Host tensor to copy the computed result back + self.host_tensors = {} + + self.ptr_A = self.tensor_to_ptr(A, "A") + self.ptr_B = self.tensor_to_ptr(B, "B") + self.ptr_C = self.tensor_to_ptr(C, "C") + self.ptr_D = self.tensor_to_ptr(D, "D", True) + if C is not None: + if not isinstance(C, cuda.CUdeviceptr): + self.tensor_c_numel = prod(C.shape) + + def tensor_to_ptr(self, tensor, name, is_output=False): + """ + Convert and remember the input tensor to cuda.CUdeviceptr used by cuda python + For numpy.ndarray, it also remembers the host buffer for synchronization + """ + if tensor is None: + return cuda.CUdeviceptr(0) + if isinstance(tensor, np.ndarray): + if is_output: + assert name + self.buffers[name] = NumpyFrontend.argument(tensor, is_output) + if is_output: + self.host_tensors[name] = tensor + return self.buffers[name].ptr + elif torch_available and isinstance(tensor, torch.Tensor): + return TorchFrontend.argument(tensor) + elif isinstance(tensor, cuda.CUdeviceptr): + return tensor + elif cupy_available and isinstance(tensor, cp.ndarray): + return CupyFrontend.argument(tensor) + else: + raise TypeError("Unsupported Frontend. Only support numpy and torch") + + def sync(self, stream_sync=True): + if stream_sync: + (err,) = cudart.cudaDeviceSynchronize() + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError("CUDA Error %s" % str(err)) + + for key in self.host_tensors.keys(): + host_tensor = self.host_tensors[key] + (err,) = cuda.cuMemcpyDtoH( + host_tensor, + self.buffers[key].ptr, + host_tensor.size * host_tensor.itemsize, + ) + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError("CUDA Error %s" % str(err)) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/c_types.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/c_types.py new file mode 100644 index 0000000000000000000000000000000000000000..73d0c66d94e3556384d3f903fd7f12a1e9cfd8aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/c_types.py @@ -0,0 +1,610 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +import ctypes + +from cutlass import ( + DataType, + KernelScheduleType +) +from cutlass.backend.library import DataTypeSizeBytes + + +class GemmCoord_(ctypes.Structure): + _fields_ = [ + ("m", ctypes.c_int), + ("n", ctypes.c_int), + ("k", ctypes.c_int) + ] + + def __init__(self, m, n, k) -> None: + self.m = m + self.n = n + self.k = k + + +class GemmCoordBatched_(ctypes.Structure): + """ + Wrapper around a GemmCoord that also contains batch count. This is used for encoding + batched GEMM inputs to CUTLASS 3 GEMMs. + """ + + _fields_ = [ + ("m", ctypes.c_int), + ("n", ctypes.c_int), + ("k", ctypes.c_int), + ("batch_count", ctypes.c_int) + ] + + def __init__(self, gemm_coord, batch_count) -> None: + self.m = gemm_coord.m + self.n = gemm_coord.n + self.k = gemm_coord.k + self.batch_count = batch_count + + +class MatrixCoord_(ctypes.Structure): + _fields_ = [ + ("row", ctypes.c_int), + ("column", ctypes.c_int) + ] + + +class dim3_(ctypes.Structure): + _fields_ = [ + ("x", ctypes.c_int), + ("y", ctypes.c_int), + ("z", ctypes.c_int) + ] + + +class StrideBatched_(ctypes.Structure): + """ + CUTLASS 3.0 strides for operands contain one static dimension and two variable dimensions. The + variable dimensions represent the stride along non-unit-stride dimension of the row/column major + layout, and the batch stride. This structure encodes the two variable dimensions. + """ + _fields_ = [ + ("major_stride", ctypes.c_int64), + ("batch_stride", ctypes.c_int64) + ] + + +class GenericMainloopArguments3x_(ctypes.Structure): + """ + Structure representing the superset of possible mainloop arguments. + This structure should not be passed to kernels directly, but, rather, + be used as an input to one of the more specific schedule arguments, which + will each select those arguments relevant to the particular schedule. + """ + _fields_ = [ + ("ptr_A", ctypes.c_void_p), + ("stride_A", StrideBatched_), + ("ptr_B", ctypes.c_void_p), + ("stride_B", StrideBatched_), + ("mma_promotion_interval", ctypes.c_int) + ] + + +def get_mainloop_arguments_3x( + kernel_schedule: KernelScheduleType, + element_A, + element_B, + alignment_A: int, + alignment_B: int) -> ctypes.Structure: + """ + Returns the ctypes structure to be used for the 3.x kernel's mainloop parameters. + + :param kernel_schedule: type of kernel schedule to be used in the mainloop + :type kerel_schedule: cutlass.KernelScheduleType + :param element_A: data type of operand A + :param element_B: data type of operand B + :param alignment_A: alignment of operand A + :type alignment_A: int + :param alignment_B: alignment of operand B + :type alignment_B: int + + :returns: ctypes structure to be used for the 3.x kernel's mainloop parameters + :rtype: ctypes.Structure + """ + class _MainloopArgumentsTma(ctypes.Structure): + _fields_ = [ + ("ptr_A", ctypes.c_void_p), + ("stride_A", StrideBatched_), + ("ptr_B", ctypes.c_void_p), + ("stride_B", StrideBatched_), + ("mma_promotion_interval", ctypes.c_int) + ] + + @staticmethod + def from_generic_mainloop_args(args: GenericMainloopArguments3x_): + return _MainloopArgumentsTma( + args.ptr_A, args.stride_A, args.ptr_B, args.stride_B, + args.mma_promotion_interval + ) + + class _MainloopArgumentsMultistage(ctypes.Structure): + _fields_ = [ + ("ptr_A", ctypes.c_void_p), + ("stride_A", StrideBatched_), + ("ptr_B", ctypes.c_void_p), + ("stride_B", StrideBatched_), + ] + + @staticmethod + def from_generic_mainloop_args(args: GenericMainloopArguments3x_): + return _MainloopArgumentsMultistage( + args.ptr_A, args.stride_A, args.ptr_B, args.stride_B, + ) + + tma_alignment_bytes = 16 + is_tma_aligned_A = ((DataTypeSizeBytes[element_A] * alignment_A) % tma_alignment_bytes) == 0 + is_tma_aligned_B = ((DataTypeSizeBytes[element_B] * alignment_B) % tma_alignment_bytes) == 0 + is_tma_aligned = is_tma_aligned_A and is_tma_aligned_B + + if kernel_schedule == KernelScheduleType.Multistage: + return _MainloopArgumentsMultistage + elif kernel_schedule == KernelScheduleType.ScheduleAuto: + if is_tma_aligned: + return _MainloopArgumentsTma + else: + return _MainloopArgumentsMultistage + else: + if is_tma_aligned: + return _MainloopArgumentsTma + else: + raise Exception(f"Specified a kernel schedule using TMA ({kernel_schedule}), but " + "the provided data types and alignments are not properly aligned for " + "using TMA.") + + +def get_gemm_arguments_3x(mainloop_arguments, epilogue_functor): + _EpilogueOutputOpParams = epilogue_functor.epilogue_type + if hasattr(epilogue_functor, "visitor"): + class _EpilogueArguments(ctypes.Structure): + _fields_ = [ + ("epilogue", _EpilogueOutputOpParams), + ("arg_C", epilogue_functor.arg_c_type), + ("arg_D", epilogue_functor.arg_d_type) + ] + + def __init__(self, output_op, ptr_c, stride_c, ptr_d, stride_d) -> None: + self.epilogue = output_op + self.arg_C = epilogue_functor.arg_c_type(ptr_c) + self.arg_D = epilogue_functor.arg_d_type(ptr_d) + else: + + class _EpilogueArguments(ctypes.Structure): + _fields_ = [ + ("epilogue", _EpilogueOutputOpParams), + ("ptr_C", ctypes.c_void_p), + ("stride_C", StrideBatched_), + ("ptr_D", ctypes.c_void_p), + ("stride_D", StrideBatched_), + ] + + class _HardwareInfo(ctypes.Structure): + _fields_ = [ + ("device_id", ctypes.c_int), + ("sm_count", ctypes.c_int) + ] + + class _GemmArguments(ctypes.Structure): + _fields_ = [ + ("mode", ctypes.c_int), + ("problem_size", GemmCoordBatched_), + ("mainloop", mainloop_arguments), + ("epilogue", _EpilogueArguments), + ("hw_info", _HardwareInfo), + ("splits", ctypes.c_int) + ] + + return _GemmArguments, _EpilogueArguments, _EpilogueOutputOpParams, _HardwareInfo + + +def get_gemm_arguments(epilogue_functor): + _EpilogueOutputOpParams = epilogue_functor.epilogue_type + + class _GemmArguments(ctypes.Structure): + _fields_ = [ + # Arguments from UniversalArgumentsBase + ("mode", ctypes.c_int), + ("problem_size", GemmCoord_), + ("batch_count", ctypes.c_int), + ("batch_stride_D", ctypes.c_longlong), + # Remaining arguments + ("epilogue", _EpilogueOutputOpParams), + ("ptr_A", ctypes.c_void_p), + ("ptr_B", ctypes.c_void_p), + ("ptr_C", ctypes.c_void_p), + ("ptr_D", ctypes.c_void_p), + ("batch_stride_A", ctypes.c_longlong), + ("batch_stride_B", ctypes.c_longlong), + ("batch_stride_C", ctypes.c_longlong), + ("stride_a", ctypes.c_longlong), + ("stride_b", ctypes.c_longlong), + ("stride_c", ctypes.c_longlong), + ("stride_d", ctypes.c_longlong), + ("lda", ctypes.c_longlong), + ("ldb", ctypes.c_longlong), + ("ldc", ctypes.c_longlong), + ("ldd", ctypes.c_longlong), + ("ptr_gather_A_indices", ctypes.c_void_p), + ("ptr_gather_B_indices", ctypes.c_void_p), + ("ptr_scatter_D_indices", ctypes.c_void_p) + ] + + return _GemmArguments, _EpilogueOutputOpParams + + +def get_gemm_arguments_streamk(epilogue_functor): + _EpilogueOutputOpParams = epilogue_functor.epilogue_type + + class _GemmArguments(ctypes.Structure): + _fields_ = [ + ("mode", ctypes.c_int), + ("problem_size", GemmCoord_), + ("batch_count", ctypes.c_int), + ("epilogue", _EpilogueOutputOpParams), + ("ptr_A", ctypes.c_void_p), + ("ptr_B", ctypes.c_void_p), + ("ptr_C", ctypes.c_void_p), + ("ptr_D", ctypes.c_void_p), + ("batch_stride_A", ctypes.c_longlong), + ("batch_stride_B", ctypes.c_longlong), + ("batch_stride_C", ctypes.c_longlong), + ("batch_stride_D", ctypes.c_longlong), + ("stride_a", ctypes.c_longlong), + ("stride_b", ctypes.c_longlong), + ("stride_c", ctypes.c_longlong), + ("stride_d", ctypes.c_longlong), + ("lda", ctypes.c_longlong), + ("ldb", ctypes.c_longlong), + ("ldc", ctypes.c_longlong), + ("ldd", ctypes.c_longlong), + ("avail_sms", ctypes.c_int) + ] + + return _GemmArguments, _EpilogueOutputOpParams + + +########################################################################################### +# GEMM Grouped +########################################################################################### + + +def get_gemm_grouped_arguments(epilogue_functor): + _EpilogueOutputOpParams = epilogue_functor.epilogue_type + + class _GEMMGroupedArguments(ctypes.Structure): + _fields_ = [ + ("problem_sizes", ctypes.c_void_p), + ("problem_count", ctypes.c_int), + ("threadblock_count", ctypes.c_int), + ("output_op", _EpilogueOutputOpParams), + ("ptr_A", ctypes.c_void_p), + ("ptr_B", ctypes.c_void_p), + ("ptr_C", ctypes.c_void_p), + ("ptr_D", ctypes.c_void_p), + ("lda", ctypes.c_void_p), + ("ldb", ctypes.c_void_p), + ("ldc", ctypes.c_void_p), + ("ldd", ctypes.c_void_p), + ("host_problem_sizes", ctypes.c_void_p) + ] + + return _GEMMGroupedArguments, _EpilogueOutputOpParams + + +############################################################################################ +# Convolution2D +############################################################################################ + + +class Conv2DProblemSize_(ctypes.Structure): + _fields_ = [ + ("N", ctypes.c_int), + ("H", ctypes.c_int), + ("W", ctypes.c_int), + ("C", ctypes.c_int), + ("P", ctypes.c_int), + ("Q", ctypes.c_int), + ("K", ctypes.c_int), + ("R", ctypes.c_int), + ("S", ctypes.c_int), + ("pad_h", ctypes.c_int), + ("pad_w", ctypes.c_int), + ("stride_h", ctypes.c_int), + ("stride_w", ctypes.c_int), + ("dilation_h", ctypes.c_int), + ("dilation_w", ctypes.c_int), + ("mode", ctypes.c_int), # kCrossCorrelation: 0, kConvolution: 1 + ("split_k_slices", ctypes.c_int), + ("groups", ctypes.c_int) + ] + + def __init__(self, problem_size) -> None: + for field_name, _ in self._fields_: + setattr(self, field_name, getattr(problem_size, field_name)) + + +class Layout4D(ctypes.Structure): + _fields_ = [("stride", ctypes.c_int * 3)] + + def __init__(self, tensor_ref): + stride = tensor_ref.stride() + setattr(self, "stride", (stride.at(0), stride.at(1), stride.at(2))) + + +class TensorRef_(ctypes.Structure): + _fields_ = [ + ("ptr", ctypes.c_void_p), + ("layout", Layout4D) + ] + + def __init__(self, tensor_ref): + setattr(self, "ptr", tensor_ref.data()) + setattr(self, "layout", Layout4D(tensor_ref.layout())) + + +class TensorRef2D_(ctypes.Structure): + _fields_ = [ + ("ptr", ctypes.c_void_p), + ("stride", ctypes.c_int) + ] + + +def get_conv2d_arguments(epilogue_functor): + _EpilogueOutputOpParams = epilogue_functor.epilogue_type + + class _Conv2dArguments(ctypes.Structure): + _fields_ = [ + ("conv_kind", ctypes.c_int), + ("problem_size", Conv2DProblemSize_), + ("ptr_A", ctypes.c_void_p), + ("ptr_B", ctypes.c_void_p), + ("ptr_C", ctypes.c_void_p), + ("ptr_D", ctypes.c_void_p), + ("tensor_C_numel", ctypes.c_int), + ("output_op", _EpilogueOutputOpParams), + ("split_k_mode", ctypes.c_int) + ] + + return _Conv2dArguments, _EpilogueOutputOpParams + + +############################################################################################ +# Reduction +############################################################################################ + + +def get_reduction_params(epilogue_functor): + _EpilogueOutputParams = epilogue_functor.epilogue_type + + class _ReductionParams(ctypes.Structure): + _fields_ = [ + ("problem_size", MatrixCoord_), + ("partitions", ctypes.c_int), + ("partition_stride", ctypes.c_longlong), + ("workspace", TensorRef2D_), + ("destination", TensorRef2D_), + ("source", TensorRef2D_), + ("output_op", _EpilogueOutputParams), + ] + + return _ReductionParams, _EpilogueOutputParams + + +########################################################################################### +# Epilogue Visitor Type Factory +########################################################################################### + +class Empty(ctypes.Structure): + _fields_ = [] + + def __init__(self, *arg) -> None: + pass + +class EmptyByte(ctypes.Structure): + _fields_ = [ + ("byte", ctypes.c_byte) + ] + + def __init__(self, *arg) -> None: + pass + +class EBO: + def __init__(self, index: int, type) -> None: + self.index = index + self.type = type + + def __eq__(self, other) -> bool: + if isinstance(other, EBO): + return self.index == other.index and self.type == other.type + return False + + def __hash__(self) -> int: + return hash((self.index, self.type)) + + def __ne__(self, other): + return not self.__eq__(other) + + def __str__(self) -> str: + return f"<{self.index}, {self.type}>" + + +def tuple_factory_(input_tuple, dtype, constants=[0,1]): + """ + The factory function generating cute::Tuple with input tuple + :param input_tuple: the input tuple + :type input_tuple: tuple + :param dtype: the data type for non-constant values + :type dtype: str, "int32_t", "int", "int64_t" + :param constant: the values that will be treated as constants + :type constant: list[int] + + :return: ctype structure representing the cute::Tuple + :return: the empty base classes of the tuple + """ + + # The empty base classes of the current tuple + empty_bases = [] + # The first non empty base class + first_non_empty_base = None + # The ctype fields of the current tuple + ctype_fields = [] + + for idx, entry in enumerate(input_tuple): + # For nested tuples + if isinstance(entry, tuple): + sub_tuple_ctype, sub_empty_bases = tuple_factory_(entry, dtype, constants) + if ctypes.sizeof(sub_tuple_ctype) == 0: + # The empty tuple base class is also an empty EBO + empty_bases.append(EBO(idx, entry)) + else: + if first_non_empty_base is None: + first_non_empty_base = sub_empty_bases + ctype_fields.append((f"entry_{idx}", sub_tuple_ctype)) + else: + if entry in constants: + empty_bases.append(EBO(idx, entry)) + ctype_fields.append((f"entry_{idx}", Empty)) + else: + ctype_fields.append((f"entry_{idx}", dtype)) + if first_non_empty_base is None: + first_non_empty_base = [] + + # Determine whether or not add an additional byte for empty base classes + additional_byte = False + # Special case for constant tuple + if first_non_empty_base is None: + additional_byte = False + else: + for base in first_non_empty_base: + if base in empty_bases: + additional_byte = True + break + + if additional_byte: + ctype_fields = [("empty_byte", EmptyByte), ] + ctype_fields + + # Create the ctype tuple + class TupleType(ctypes.Structure): + _fields_ = ctype_fields + + def __init__(self, args) -> None: + if additional_byte: + fields = self._fields_[1:] + else: + fields = self._fields_ + + assert len(fields) == len(args) + for field, arg in zip(fields, args): + name = field[0] + field_type = field[1] + setattr(self, name, field_type(arg)) + + return TupleType, empty_bases + +def tuple_factory(input_tuple, dtype: str, constants=[0,1]): + """ + The factory function generating cute::Tuple with input tuple + :param input_tuple: the input tuple + :type input_tuple: tuple + :param dtype: the data type for non-constant values + :type dtype: str, "int32_t", "int", "int64_t" + :param constant: the values that will be treated as constants + :type constant: list[int] + + :return: ctype structure representing the cute::Tuple + :return: the empty base classes of the tuple + """ + # Step 1: convert the dtype + if dtype == "int64_t": + dtype = ctypes.c_longlong + elif dtype in ["int", "int32_t"]: + dtype = ctypes.c_int32 + else: + raise NotImplementedError(f"Type {dtype} is not supported") + + tuple_type, _ = tuple_factory_(input_tuple, dtype, constants) + + if ctypes.sizeof(tuple_type) == 0: + return EmptyByte + return tuple_type + + +def visitor_factory(node_types, node_names): + """ + Creates the argument type of epilogue visitor type + + :param node_types: list of argument types under ctypes + :param node_names: list of argument names under str + + :return: tuple type in ctypes.Structure + """ + ctypes_field = [] + # Struct is used when number of nodes < 4 + # Because the Sm90VisitorImplBase has specification up to 4 nodes + # in `include/cutlass/epilogue/fusion/sm90_visitor_tma_warpspecialized.hpp` + if len(node_types) <= 4: + for idx, node_type in enumerate(node_types): + if ctypes.sizeof(node_type) == 0: + # Special case for empty struct + # 1 byte placeholder is used for correct alignment + ctypes_field.append((node_names[idx], ctypes.c_byte)) + else: + ctypes_field.append((node_names[idx], node_type)) + + class VisitorType(ctypes.Structure): + _fields_ = ctypes_field + + def __init__(self, kwargs) -> None: + for field in self._fields_: + fname, ftype = field + if ftype != ctypes.c_byte: + setattr(self, fname, ftype(kwargs)) + + # For cases with more than 4 nodes, tuple is used + else: + for idx, node_type in enumerate(node_types): + ctypes_field.append((node_names[idx], node_type)) + + class VisitorType(ctypes.Structure): + _fields_ = ctypes_field + + def __init__(self, kwargs) -> None: + for field in self._fields_: + fname, ftype = field + setattr(self, fname, ftype(kwargs)) + + return VisitorType diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/compiler.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..f03cd2be6fb0d1d2b0da8c8838a7ab1b01db8440 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/compiler.py @@ -0,0 +1,461 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +import ctypes +import json +import os +import sqlite3 +import subprocess +import tempfile + +from cuda import cuda, nvrtc + +from cutlass import CACHE_FILE, CUDA_INSTALL_PATH, CUTLASS_PATH, logger +from cutlass.backend.gemm_operation import GemmOperationUniversal +from cutlass.backend.library import ApiVersion +from cutlass.backend.utils.device import device_cc +from cutlass.backend.utils.software import SubstituteTemplate + +IncludeTemplate = r"""#include "${include}" +""" + + +def compile_with_nvcc(cmd, source, error_file): + succeed = True + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + error_message = e.output.decode() + with open(error_file, "w") as error_out: + error_log = "Compilation error for the following kernel: \n" + error_log += source + error_log += "\nError Message:\n" + error_log += error_message + error_out.write(error_log) + succeed = False + if not succeed: + # Print the error log to stdout if log level is set to warning or higher + # verbosity. Otherwise, simply point to the error log file. + logger.warning(error_log) + raise Exception(f"Invalid Kernel. See '{error_file}' for details.") + + +class CompilationOptions: + """ + Compilation options. + """ + + def __init__(self, flags, arch, include_paths=[]): + self.includes = [] + self.include_paths = include_paths + self.flags = flags + self.arch = arch + + def get_str(self): + options = "" + + for flag in self.flags: + options += " " + flag + + for incl in self.include_paths: + options += " --include-path=%s" % incl + + arch_flag = " -arch=sm_%d" % self.arch + if self.arch == 90: + arch_flag += "a" + options += arch_flag + + return options + + def get(self): + options = [] + + for flag in self.flags: + options.append(bytes(str.encode(flag))) + + for incl in self.include_paths: + options.append(bytes(str.encode("--include-path=%s" % incl))) + + arch_flag = " -arch=sm_%d" % self.arch + if self.arch == 90: + arch_flag += "a" + + options.append(bytes(str.encode(arch_flag))) + + return options + + +def convertToBinaryData(filename): + with open(filename, "rb") as file: + blobData = file.read() + return blobData + + +def CDLLBin(host_binary): + tempfile.tempdir = "./" + temp_so = tempfile.NamedTemporaryFile(prefix="host_func", suffix=".so", delete=True) + with open(temp_so.name, "wb") as file: + file.write(host_binary) + host_lib = ctypes.CDLL(temp_so.name) + return host_lib + + +class ArtifactManager: + """ + Artifact manager + """ + + def __init__(self) -> None: + connection = sqlite3.connect(CACHE_FILE) + cursor = connection.cursor() + # Create the table if it does not already exist + sqlite_create_table_query = """ + CREATE TABLE IF NOT EXISTS compiled_operations(op_key TEXT NOT NULL UNIQUE, + cubin BLOB NOT NULL, + hostbin BLOB NOT NULL, + op_name TEXT NOT NULL, + op_attrs TEXT NOT NULL) + """ + cursor.execute(sqlite_create_table_query) + connection.commit() + cursor.close() + + self._nvrtc_compile_options = ["-std=c++17", "-default-device"] + self._nvcc_compile_options = [ + "-std=c++17", + "--expt-relaxed-constexpr", + "-Xcudafe --diag_suppress=esa_on_defaulted_function_ignored", + ] + self.nvcc() + self.compiled_cache_device = {} + self.compiled_cache_host = {} + + def nvrtc(self): + self.backend = "nvrtc" + self.default_compile_options = self._nvrtc_compile_options + + def nvcc(self): + self.backend = "nvcc" + self.default_compile_options = self._nvcc_compile_options + + def insert_operation(self, op_key, cubin, hostfile, op_name, op_attrs): + connection = sqlite3.connect(CACHE_FILE) + cursor = connection.cursor() + sqlite_insert_blob_query = """ INSERT OR IGNORE INTO compiled_operations (op_key, cubin, hostbin, op_name, op_attrs) VALUES (?, ?, ?, ?, ?)""" + + hostbin = convertToBinaryData(hostfile) + + data_tuple = (op_key, cubin, hostbin, op_name, json.dumps(op_attrs)) + + cursor.execute(sqlite_insert_blob_query, data_tuple) + connection.commit() + cursor.close() + + def load_operation(self, op_key, extra_funcs): + connection = sqlite3.connect(CACHE_FILE) + cursor = connection.cursor() + sqlite_fetch_blob_query = """SELECT * from compiled_operations where op_key = ?""" + cursor.execute(sqlite_fetch_blob_query, (op_key,)) + record = cursor.fetchall() + if len(record) == 0: + return False + for row in record: + key, cubin_image, host_binary, operation_name, op_attr = row + op_attr = json.loads(op_attr) + err, module = cuda.cuModuleLoadData(cubin_image) + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError("Cuda Error: {}".format(err)) + + err, kernel = cuda.cuModuleGetFunction(module, bytes(str.encode(operation_name))) + self.compiled_cache_device[key] = kernel + + compiled_host_fns = {} + host_lib = CDLLBin(host_binary) + + func_name = operation_name + "_get_params" + func = getattr(host_lib, func_name) + func.restype = ctypes.POINTER(ctypes.c_char * op_attr[0]) + compiled_host_fns["get_args"] = func + + func_name = operation_name + "_shared_memory_size" + func = getattr(host_lib, func_name) + compiled_host_fns["shared_memory_capacity"] = func() + + for attr in op_attr: + if isinstance(attr, str): + func_name = operation_name + "_" + attr + func = getattr(host_lib, func_name) + + # Set the return type of the function + if attr in extra_funcs and extra_funcs[attr] != None: + func.restype = extra_funcs[attr] + + compiled_host_fns[attr] = func + + self.compiled_cache_host[key] = compiled_host_fns + return True + + def emit_compile_(self, operation_list, compilation_options, host_compilation_options): + """ + Compile a list of kernels and store them into database + """ + source_buffer_device = "" + source_buffer_host = "" + # 1. include + includes = [] + for operation in operation_list: + for incl in operation.emitter.includes: + if incl not in includes: + includes.append(incl) + + includes_host = ["builtin_types.h", "device_launch_parameters.h", "stddef.h"] + includes + for incl in includes: + source_buffer_device += SubstituteTemplate( + IncludeTemplate, + {"include": incl}, + ) + + for incl in includes_host: + source_buffer_host += SubstituteTemplate( + IncludeTemplate, + {"include": incl}, + ) + + # 2. Operations + for operation in operation_list: + source_buffer_device += operation.emit() + source_buffer_host += operation.emit() + values = { + "operation_name": operation.name(), + "operation_suffix": operation.emitter.operation_suffix, + } + source_buffer_device += SubstituteTemplate( + operation.KernelTemplate, + values, + ) + source_buffer_host += SubstituteTemplate(operation.HostTemplate, values) + + if self.backend == "nvrtc": + # 3. compile + err, program = nvrtc.nvrtcCreateProgram( + str.encode(source_buffer_device), + bytes(str.encode("module.cu")), + 0, [], []) + + if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: + raise RuntimeError("NVRTC Error: {}".format(err)) + + # Compile program + options = compilation_options.get() + + err, = nvrtc.nvrtcCompileProgram(program, len(options), options) + if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: + error_string = "NVRTC Error: {}\n".format(err) + + # Get log from compilation + err, logSize = nvrtc.nvrtcGetProgramLogSize(program) + if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: + raise RuntimeError("NVRTC Error: {}".format(err)) + + log = b" " * logSize + err, = nvrtc.nvrtcGetProgramLog(program, log) + if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: + raise RuntimeError("NVRTC Error: {}".format(err)) + + raise RuntimeError(error_string + log.decode() + source_buffer_device) + + # Get data from compilation + err, dataSize = nvrtc.nvrtcGetCUBINSize(program) + if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: + raise RuntimeError("NVRTC Error: {}".format(err)) + + cubin_image = b" " * dataSize + (err,) = nvrtc.nvrtcGetCUBIN(program, cubin_image) + if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: + raise RuntimeError("NVRTC Error: {}".format(err)) + + else: # with nvcc backend + # emit code + tempfile.tempdir = "./" + temp_cu = tempfile.NamedTemporaryFile( + prefix="kernel", suffix=".cu", delete=True) + temp_cubin = tempfile.NamedTemporaryFile( + prefix="kernel", suffix=".cubin", delete=True) + with open(temp_cu.name, "w") as file: + file.write(source_buffer_device) + + # compile with nvcc + cmd_template = "${cuda_install_path}/bin/nvcc ${options} -cubin ${srcfile} -o ${tarfile}" + values = { + "cuda_install_path": CUDA_INSTALL_PATH, + "options": compilation_options.get_str(), + "srcfile": temp_cu.name, + "tarfile": temp_cubin.name, + } + cmd = SubstituteTemplate(cmd_template, values) + compile_with_nvcc(cmd, source_buffer_device, "./cutlass_python_compilation_device_error.txt") + + # load the cubin image + with open(temp_cubin.name, "rb") as file: + cubin_image = file.read() + + # Set up the host-side library code + cmd_template = ( + "echo '%s'|${cuda_install_path}/bin/nvcc -x cu -Xcompiler=\"-fpermissive -w -fPIC\" ${options}" + % source_buffer_host + ) + cmd = SubstituteTemplate( + cmd_template, + { + "cuda_install_path": CUDA_INSTALL_PATH, + "options": host_compilation_options.get_str(), + }, + ) + + tempfile.tempdir = "./" + temp = tempfile.NamedTemporaryFile( + prefix="host_func", suffix=".so", delete=True) + + cmd += " - -shared -o %s -lcudart -lcuda" % temp.name + compile_with_nvcc(cmd, source_buffer_host, error_file="./cutlass_python_compilation_host_error.txt") + host_lib = ctypes.CDLL(temp.name) + + return cubin_image, host_lib, temp + + def add_module(self, operations, compile_options=None, bypass_cache=False): + """ + Insert a new compiled device module + """ + include_paths = [ + CUDA_INSTALL_PATH + "/include", + CUTLASS_PATH + "/include", + CUTLASS_PATH + "/tools/util/include", + CUTLASS_PATH + "/python/cutlass/cpp/include", + ] + + if device_cc() is not None: + arch = device_cc() + else: + # Find the maximum arch tag among the provided operations and compile for that target. + # Since we are compiling to .cubin files, only one architecture may be specified. + arch = max([op.arch for op in operations]) + host_compile_options = CompilationOptions( + self._nvcc_compile_options, arch, include_paths) + if compile_options is None: + compile_options = CompilationOptions( + self.default_compile_options, arch, include_paths) + # save the cubin + operation_key = [] + operation_list = [] + for operation in operations: + # step 1: get kernel string as key + key = operation.rt_module.emit() + operation.procedural_name() + self.backend + # step 1: check if the operation is in cache + compiled_kernel = self.compiled_cache_device.get(key) + + if compiled_kernel is None and not bypass_cache: + hit = self.load_operation(key, getattr( operation.rt_module, "extra_funcs", {})) + if hit: + compiled_kernel = self.compiled_cache_device.get(key) + assert compiled_kernel is not None + if compiled_kernel is not None: + operation.rt_module.kernel = compiled_kernel + compiled_host_fns = self.compiled_cache_host.get(key) + assert compiled_host_fns is not None + for key in compiled_host_fns.keys(): + setattr(operation.rt_module, key, compiled_host_fns[key]) + operation.rt_module.initialize() + else: + operation_list.append(operation.rt_module) + operation_key.append(key) + + if len(operation_list) > 0: + cubin_image, host_lib, host_file = self.emit_compile_( + operation_list, compile_options, host_compile_options) + + err, module = cuda.cuModuleLoadData(cubin_image) + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError("Cuda Error: {}".format(err)) + + operation_name = [] + operation_attr = [] + for operation, key in zip(operation_list, operation_key): + # get device kernels + err, operation.kernel = cuda.cuModuleGetFunction( + module, + bytes(str.encode(operation.name())) + ) + operation_name.append(operation.name()) + self.compiled_cache_device[key] = operation.kernel + # get host functions + compiled_host_fns = {} + op_attr = [] + + # get param size + func_name = operation.name() + "_get_param_size" + func = getattr(host_lib, func_name) + param_size = func() + + func_name = operation.name() + "_get_params" + func = getattr(host_lib, func_name) + func.argtype = operation.argtype + func.restype = ctypes.POINTER(ctypes.c_char * param_size) + setattr(operation, "get_args", func) + compiled_host_fns["get_args"] = func + + # set shared memory size + func_name = operation.name() + "_shared_memory_size" + func = getattr(host_lib, func_name) + setattr(operation, "shared_memory_capacity", func()) + compiled_host_fns["shared_memory_capacity"] = func() + # set the maximum dynamic shared size + operation.initialize() + + # get extra functions + op_attr.append(param_size) + + if hasattr(operation, "extra_funcs"): + for suffix, ret_type in operation.extra_funcs.items(): + func_name = operation.name() + "_" + suffix + func = getattr(host_lib, func_name) + if ret_type is not None: + func.restype = ret_type + setattr(operation, suffix, func) + compiled_host_fns[suffix] = func + op_attr.append(suffix) + + operation_attr.append(op_attr) + self.compiled_cache_host[key] = compiled_host_fns + + for (key, operation_name, operation_attr,) in zip(operation_key, operation_name, operation_attr): + self.insert_operation( + key, cubin_image, host_file.name, operation_name, operation_attr) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/conv2d_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/conv2d_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..466c71b491b9009c0b61c40e287599fbcd72aa7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/conv2d_operation.py @@ -0,0 +1,698 @@ +################################################################################ +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################ + +import ctypes +from typing import Union + +from cuda import cuda +import numpy as np + +from cutlass import ( + ConvKindNames, + ConvKindTag, + DataTypeNames, + DataTypeSize, + DataTypeTag, + IteratorAlgorithmNames, + IteratorAlgorithmTag, + LayoutTag, + LayoutType, + MathOperation, + MathOperationTag, + OpcodeClass, + OpcodeClassNames, + OpcodeClassTag, + OperationKind, + ShortDataTypeNames, + ShortLayoutTypeNames, + SplitKMode, + StrideSupport, + StrideSupportTag, + SwizzlingFunctor, + SwizzlingFunctorTag, + get_complex_from_real, +) + +from cutlass.backend.arguments import ArgumentBase +from cutlass.backend.c_types import dim3_, get_conv2d_arguments +from cutlass.backend.library import ( + EmissionType, + TensorDescription, + TileDescription, +) +from cutlass.backend.memory_manager import device_mem_alloc +from cutlass.backend.operation import ExecutableOperation, LaunchConfiguration +from cutlass.backend.utils.datatypes import to_device_ptr +from cutlass.backend.utils.software import CheckPackages, SubstituteTemplate +from cutlass.shape import GemmCoord + +if CheckPackages().check_torch(): + import torch + + +class Conv2dArguments(ArgumentBase): + """ + Argument wrapper for Conv2d. It encodes problem information and + user-provide tensors into the kernel's argument. + + :param operation: the Conv2d operation to take the argument + :type operation: :class:`cutlass.backend.Conv2dOperation` + :param problem_size: the Conv2d problem size + :type problem_size: :class:`cutlass.shape.Conv2dProblemSize` + :param A: tensor A + :type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + :param B: tensor B + :type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + :param C: tensor C + :type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + :param D: tensor D + :type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + :param split_k_mode: conv2d split K mode, defaults to cutlass_library.library.SplitKMode.Serial + :type split_k_mode: cutlass_library.library.SplitKMode, optional + :param output_op: output operator, optional + :type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments` + """ + + def __init__(self, operation, problem_size, A, B, C, D, + split_k_mode=SplitKMode.Serial, **kwargs, ) -> None: + self.operation = operation + self.conv_kind = operation.conv_kind + self.layout_A = operation.A.layout + self.layout_B = operation.B.layout + self.layout_C = operation.C.layout + + self.element_A = operation.A.element + self.element_B = operation.B.element + self.element_C = operation.C.element + + if self.layout_C == LayoutType.TensorNC32HW32: + raise Exception("Layout type TensorNC32HW32 is not currently supported") + + super().__init__(A, B, C, D, **kwargs) + + if "split_k_slices" in kwargs.keys() and kwargs["split_k_slices"] > 1: + self.split_k_mode = split_k_mode + self.split_k_slices = kwargs["split_k_slices"] + else: + self.split_k_mode = SplitKMode.Serial + self.split_k_slices = 1 + + if "output_op" in kwargs.keys() and self.split_k_mode != SplitKMode.Parallel: + self.output_op = kwargs["output_op"] + else: + self.output_op = self.operation.epilogue_type(1.0, 0.0) + + self.problem_size = problem_size + self.problem_size.split_k_slices = self.split_k_slices + + self.initialize() + + def get_arguments(self): + tc_numel = -1 + if hasattr(self, "tensor_c_numel"): + tc_numel = self.tensor_c_numel + + self.c_arguments = self.operation.argument_type( + int(self.conv_kind), + self.problem_size.ctype, + int(to_device_ptr(self.ptr_A)), + int(to_device_ptr(self.ptr_B)), + int(to_device_ptr(self.ptr_C)), + int(to_device_ptr(self.ptr_D)), + tc_numel, + self.output_op, + int(self.split_k_mode) + ) + + def initialize(self): + self.launch_config = self.operation.rt_module.plan(self) + + self.get_arguments() + + # Allocate and initialize device workspace + device_workspace_size = self.operation.rt_module.get_workspace_size(self.c_arguments) + if device_workspace_size > 0: + self.workspace_buffer = device_mem_alloc(device_workspace_size) + workspace_ptr = self.workspace_buffer.ptr + err, = cuda.cuMemsetD32( + workspace_ptr, 0, device_workspace_size // 4) + else: + workspace_ptr = None + + self.semaphore = 0 + if workspace_ptr is not None and self.split_k_mode == SplitKMode.Parallel: + self.ptr_D = workspace_ptr + # Reset arguments now that ptr_D has been updated + self.get_arguments() + elif workspace_ptr is not None and self.split_k_mode == SplitKMode.Serial: + self.semaphore = workspace_ptr + + params_ = self.operation.rt_module.get_args( + self.c_arguments, ctypes.c_void_p(int(self.semaphore))) + self.host_workspace = bytearray(params_.contents) + self.device_workspace = None + + def sync(self): + """ + Synchronize the arguments. If the input tensor is in host, + copy it from device to host. + """ + return super().sync() + + +class Conv2dRT(ExecutableOperation): + """ + Conv2dRT manages the CUTLASS runtime components + """ + + KernelTemplate = r""" +extern "C" +__global__ void +${operation_name}(${operation_name}${operation_suffix}::Params params) { + + // Dynamic shared memory base pointer + extern __shared__ int SharedStorageBase[]; + + // Declare pointer to dynamic shared memory. + ${operation_name}${operation_suffix}::SharedStorage *shared_storage = + reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase); + + ${operation_name}${operation_suffix} op; + + op(params, *shared_storage); +} + """ + + HostTemplate = r""" +extern "C" { + // Get the size of params in bytes + int ${operation_name}_get_param_size(){ + return sizeof(${operation_name}${operation_suffix}::Params); + } + + // Get the size of dynamic shared memory in bytes + int ${operation_name}_shared_memory_size() { + return int(sizeof(${operation_name}${operation_suffix}::SharedStorage)); + } + + using ElementA = typename ${operation_name}_base::ElementA; + using ElementB = typename ${operation_name}_base::ElementB; + using ElementC = typename ${operation_name}_base::ElementC; + using LayoutA = typename ${operation_name}_base::LayoutA; + using LayoutB = typename ${operation_name}_base::LayoutB; + using LayoutC = typename ${operation_name}_base::LayoutC; + using EpilogueOutputOp = typename ${operation_name}_base::EpilogueOutputOp; + + struct ${operation_name}_TemporaryArgs { + int conv_kind; + cutlass::conv::Conv2dProblemSize problem_size; + ElementA* ptr_A; + ElementB* ptr_B; + ElementC* ptr_C; + ElementC* ptr_D; + int tensor_c_numel; + typename EpilogueOutputOp::Params epilogue_params; + int split_k_mode; + }; + + typename ${operation_name}${operation_suffix}::Arguments + construct_arguments(${operation_name}_TemporaryArgs args) { + cutlass::conv::Operator conv_operator = static_cast(args.conv_kind); + auto tc_A = cutlass::conv::implicit_gemm_tensor_a_extent(conv_operator, args.problem_size); + auto tc_B = cutlass::conv::implicit_gemm_tensor_b_extent(conv_operator, args.problem_size); + auto tc_C = cutlass::conv::implicit_gemm_tensor_c_extent(conv_operator, args.problem_size); + auto tc_D = cutlass::conv::implicit_gemm_tensor_c_extent(conv_operator, args.problem_size); + + auto size_C = tc_C.at(0) * tc_C.at(1) * tc_C.at(2) * tc_C.at(3); + if (args.tensor_c_numel >= 0 && args.tensor_c_numel == tc_C.at(3) && args.tensor_c_numel < size_C) { + // C is interpreted as bias + tc_C = {0, 0, 0, 0}; + } + + cutlass::TensorRef tref_A(args.ptr_A, LayoutA::packed(tc_A)); + cutlass::TensorRef tref_B(args.ptr_B, LayoutB::packed(tc_B)); + cutlass::TensorRef tref_C(args.ptr_C, LayoutC::packed(tc_C)); + cutlass::TensorRef tref_D(args.ptr_D, LayoutC::packed(tc_D)); + + return { + args.problem_size, + tref_A, + tref_B, + tref_C, + tref_D, + args.epilogue_params, + static_cast(args.split_k_mode) + }; + } + + // Get the params as byte array + char* ${operation_name}_get_params(${operation_name}_TemporaryArgs args, int *semaphore=nullptr) { + auto arguments = construct_arguments(args); + typename ${operation_name}${operation_suffix}::Params* params; + params = new ${operation_name}${operation_suffix}::Params(arguments, semaphore); + + char *bytes = ((char*)(params)); + char *output = new char[sizeof(${operation_name}${operation_suffix}::Params)]; + for (unsigned int i = 0; i < sizeof(${operation_name}${operation_suffix}::Params); i ++) + output[i] = bytes[i]; + + return output; + } + + dim3 ${operation_name}_get_grid_shape( + int conv_kind, + cutlass::conv::Conv2dProblemSize problem_size, + cutlass::gemm::GemmCoord tile_size, + int split_k_slices + ) { + + using Swizzle = typename ${operation_name}_base::ThreadblockSwizzle; + auto tiled_shape = Swizzle::get_tiled_shape( + static_cast(conv_kind), + problem_size, + tile_size, + split_k_slices); + + return Swizzle::get_grid_shape(tiled_shape); + } + + size_t ${operation_name}_get_workspace_size(${operation_name}_TemporaryArgs args) { + auto arguments = construct_arguments(args); + + // Temporarily define device::-level Conv2d so that we can call get_workspace_size + using DeviceConv = cutlass::conv::device::ImplicitGemmConvolution<${operation_name}_base>; + return DeviceConv::get_workspace_size(arguments); + } +} + + """ + + def __init__(self, operation: "Conv2dOperation"): + super().__init__(operation) + self.extra_funcs = { + "get_grid_shape": dim3_, + "get_workspace_size": ctypes.c_uint64 + } + self.argument_type, self.epilogue_type = get_conv2d_arguments(operation.epilogue_functor) + self.argtype = [ctypes.POINTER(self.argument_type), ctypes.c_void_p] + self.conv_kind = operation.conv_kind + + self.operation: Conv2dOperation = operation + + self.emitter = EmitConv2dInstance("_type") + + self.threads = operation.tile_description.num_threads + + self.swizzle_functor = operation.swizzling_functor + + def emit(self): + return self.emitter.emit(self.operation) + + def plan(self, arguments: Conv2dArguments): + tile_size = GemmCoord( + self.operation.tile_description.threadblock_shape[0], + self.operation.tile_description.threadblock_shape[1], + self.operation.tile_description.threadblock_shape[2], + ) + + grid = self.get_grid_shape( + int(self.conv_kind), + arguments.problem_size.ctype, + tile_size.ctype, + arguments.split_k_slices + ) + + return LaunchConfiguration( + [grid.x, grid.y, grid.z], [self.threads, 1, 1], + self.shared_memory_capacity) + + def initialize(self): + err, = cuda.cuFuncSetAttribute( + self.kernel, + attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, + value=self.shared_memory_capacity) + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError(f"CUDA Error: {err}") + + +class Conv2dOperation: + """ + CUTLASS Conv2d operation description. + + :param conv_kind: convolution operator + :type conv_kind: :class:`cutlass_library.library.ConvKind` + + :param iterator_algorithm: Selects among several implementation + variants trading off performance with simplicity + :type iterator_algorithm: :class:`cutlass_library.library.IteratorAlgorithm` + + :param arch: GPU compute capability (sm_xx) + :type arch: int + + :param tile_description: tile description + :type tile_description: :class:`cutlass.backend.TileDescription` + + :param A: tensor A description + :type A: :class:`cutlass.backend.TensorDescription` + + :param B: tensor B description + :type B: :class:`cutlass.backend.TensorDescription` + + :param C: tensor C description + :type C: :class:`cutlass.backend.TensorDescription` + + :param D: tensor D description + :type D: :class:`cutlass.backend.TensorDescription` + + :param element_epilogue: element type for computation in epilogue \ + :type element_epilogue: cutlass_library.library.DataType + + :param stride_support: distinguish among partial specializations that \ + accelerate certain problems where convolution stride is unit \ + :type stride_support: :class:`cutlass_library.library.StrideSupport` + + :param epilogue_functor: convolution epilogue functor + :type epilogue_functor: :class:`EpilogueFunctor` + + :param swizzling_functor: threadblock swizzling functor + """ + def __init__( + self, + conv_kind, + iterator_algorithm, + arch: int, + tile_description: TileDescription, + A: TensorDescription, + B: TensorDescription, + C: TensorDescription, + stride_support, + epilogue_functor, + swizzling_functor=SwizzlingFunctor.Identity1, + emission_type=EmissionType.Kernel, + **kwargs + ): + self.operation_kind: OperationKind = OperationKind.Conv2d + self.arch: int = arch + self.tile_description: TileDescription = tile_description + self.conv_kind = conv_kind + self.A: TensorDescription = A + self.B: TensorDescription = B + self.C: TensorDescription = C + self.epilogue_functor = epilogue_functor + self.iterator_algorithm = iterator_algorithm + self.stride_support = stride_support + self.swizzling_functor = swizzling_functor + + self.emission_type = emission_type + + self.rt_module: Conv2dRT = Conv2dRT(self) + self.argument_type = self.rt_module.argument_type + self.epilogue_type = self.rt_module.epilogue_type + + def run(self, arguments: Conv2dArguments) -> cuda.CUresult: + """ + Launch the cuda kernel with input arguments + + :param arguments: conv2d arguments + :type arguments: :class:`cutlass.backend.Conv2dArguments` + """ + + # launch the kernel + err = self.rt_module.run( + arguments.host_workspace, + arguments.device_workspace, + arguments.launch_config, + ) + + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError(f"CUDA Error {err}") + + return err + + # + # Get function name + # + + def procedural_name(self): + """The full procedural name indicates architecture, extended name, tile size, and layout.""" + return self.configuration_name() + + def configuration_name(self): + """The full procedural name indicates architecture, extended name, tile size, and layout.""" + + opcode_class_name = OpcodeClassNames[ + self.tile_description.math_instruction.opcode_class + ] + + threadblock = "%dx%d_%dx%d" % ( + self.tile_description.threadblock_shape[0], + self.tile_description.threadblock_shape[1], + self.tile_description.threadblock_shape[2], + self.tile_description.stages, + ) + + if self.stride_support == StrideSupport.Unity: + configuration_name = "cutlass_sm${arch}_${opcode_class}_${extended_name}_${threadblock}_${layout}_unity_stride_align${alignment}" + else: + configuration_name = "cutlass_sm${arch}_${opcode_class}_${extended_name}_${threadblock}_${layout}_align${alignment}" + + return SubstituteTemplate( + configuration_name, + { + "arch": str(self.arch), + "opcode_class": opcode_class_name, + "extended_name": self.extended_name(), + "threadblock": threadblock, + "layout": self.layout_name(), + "alignment": "%d" % self.A.alignment + }, + ) + + def extended_name(self): + """Append data types if they differ from compute type.""" + if self.C.element != self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${element_c}_${core_name}_${element_a}" + elif self.C.element == self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${core_name}_${element_a}" + else: + extended_name = "${core_name}" + + extended_name = SubstituteTemplate(extended_name, { + "element_a": DataTypeNames[self.A.element], + "element_c": DataTypeNames[self.C.element], + "core_name": self.core_name(), + }) + + return extended_name + + def layout_name(self): + return "%s" % (ShortLayoutTypeNames[self.A.layout]) + + def core_name(self): + """The basic operation kind is prefixed with a letter indicating the accumulation type.""" + + intermediate_type = "" + + if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp: + inst_shape = "%dx%dx%d" % tuple( + self.tile_description.math_instruction.instruction_shape) + if self.tile_description.math_instruction.element_a != self.A.element and \ + self.tile_description.math_instruction.element_a != self.accumulator_type(): + intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] + else: + inst_shape = "" + + return "%s%s%s%s_%s" % ( + ShortDataTypeNames[self.accumulator_type()], + inst_shape, + intermediate_type, + ConvKindNames[self.conv_kind], + IteratorAlgorithmNames[self.iterator_algorithm] + ) + + def is_complex(self): + complex_operators = [ + MathOperation.multiply_add_complex, + MathOperation.multiply_add_complex_gaussian, + ] + return self.tile_description.math_instruction.math_operation in complex_operators + + def accumulator_type(self): + accum = self.tile_description.math_instruction.element_accumulator + + if self.is_complex(): + return get_complex_from_real(accum) + + return accum + + def device_op(self): + """ + Returns a new Conv2dOperation object that is constructed with emission type + ``EmissionType.Device``. + + :return: operation ready for device-level code emission + :rtype: Conv2dOperation + """ + return Conv2dOperation( + self.conv_kind, self.iterator_algorithm, self.arch, self.tile_description, + self.A, self.B, self.C, self.stride_support, self.epilogue_functor, self.swizzling_functor, + emission_type=EmissionType.Device) + + +################################################################################################### +# +# Emits single instances of a CUTLASS device-wide operator +# +################################################################################################### + + +class EmitConv2dInstance: + def __init__(self, operation_suffix=""): + self.operation_suffix = operation_suffix + self.includes = [ + "cutlass/cutlass.h", + "cutlass/conv/kernel/default_conv2d_fprop.h", + "cutlass/conv/kernel/default_conv2d_dgrad.h", + "cutlass/conv/kernel/default_conv2d_wgrad.h", + "cutlass/conv/device/implicit_gemm_convolution.h" + ] + self.template = """ +// Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}" +using ${operation_name}_base = +typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}< + ${element_a}, + ${layout_a}, + ${element_b}, + ${layout_b}, + ${element_c}, + ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}, + ${swizzling_functor}, + ${stages}, + ${math_operator}, + ${iterator_algorithm}, + ${stride_support}, + ${align_a}, + ${align_b} +>::Kernel; + +struct ${operation_name}${operation_suffix}: + public ${operation_name}_base { }; + +""" + + self.template_device = """ +// Conv2d operation ${operation_name} + +using Conv2d${conv_kind_name}Kernel = typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}< + ${element_a}, + ${layout_a}, + ${element_b}, + ${layout_b}, + ${element_c}, + ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}, + ${swizzling_functor}, + ${stages}, + ${math_operator}, + ${iterator_algorithm}, + ${stride_support}, + ${align_a}, + ${align_b} +>::Kernel; + +using DeviceKernel = + typename cutlass::conv::device::ImplicitGemmConvolution; +""" + + def emit(self, operation): + warp_shape = [int(operation.tile_description.threadblock_shape[idx] / + operation.tile_description.warp_count[idx]) for idx in range(3)] + + epilogue_vector_length = int(min( + operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) + + values = { + "operation_name": operation.procedural_name(), + "operation_suffix": self.operation_suffix, + "conv_kind": ConvKindTag[operation.conv_kind], + "conv_kind_name": ConvKindNames[operation.conv_kind].capitalize(), + "element_a": DataTypeTag[operation.A.element], + "layout_a": LayoutTag[operation.A.layout], + "element_b": DataTypeTag[operation.B.element], + "layout_b": LayoutTag[operation.B.layout], + "element_c": DataTypeTag[operation.C.element], + "layout_c": LayoutTag[operation.C.layout], + "element_accumulator": DataTypeTag[operation.accumulator_type()], + "opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + "arch": "cutlass::arch::Sm%d" % operation.arch, + "threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]), + "threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]), + "threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]), + "warp_shape_m": str(warp_shape[0]), + "warp_shape_n": str(warp_shape[1]), + "warp_shape_k": str(warp_shape[2]), + "instruction_shape_m": str(operation.tile_description.math_instruction.instruction_shape[0]), + "instruction_shape_n": str(operation.tile_description.math_instruction.instruction_shape[1]), + "instruction_shape_k": str(operation.tile_description.math_instruction.instruction_shape[2]), + "epilogue_vector_length": str(epilogue_vector_length), + "epilogue_functor": operation.epilogue_functor.emit(), + "swizzling_functor": SwizzlingFunctorTag[operation.swizzling_functor], + "stages": str(operation.tile_description.stages), + "iterator_algorithm": IteratorAlgorithmTag[operation.iterator_algorithm], + "iterator_algorithm_name": IteratorAlgorithmNames[operation.iterator_algorithm].capitalize(), + "stride_support": StrideSupportTag[operation.stride_support], + "math_operator": "cutlass::arch::OpMultiplyAddComplex" if operation.is_complex() else MathOperationTag[operation.tile_description.math_instruction.math_operation], + "align_a": str(operation.A.alignment), + "align_b": str(operation.B.alignment), + } + + if operation.emission_type == EmissionType.Kernel: + conv2d_template = self.template + else: + conv2d_template = self.template_device + + return SubstituteTemplate(conv2d_template, values) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/epilogue.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/epilogue.py new file mode 100644 index 0000000000000000000000000000000000000000..df87f6c9c29c83b429bffc85486a472019c3d721 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/epilogue.py @@ -0,0 +1,514 @@ +################################################################################ +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################ + +import ctypes + +import numpy as np +from scipy.special import erf + +from cutlass import DataType, DataTypeTag +from cutlass.backend.c_types import MatrixCoord_ +from cutlass.backend.frontend import NumpyFrontend +from cutlass.backend.library import ActivationOp, ActivationOpTag +from cutlass.backend.utils.software import CheckPackages, SubstituteTemplate + +dtype2ctype = { + DataType.f16: ctypes.c_uint16, + DataType.f32: ctypes.c_float, + DataType.f64: ctypes.c_double, + DataType.s8: ctypes.c_int8, + DataType.s32: ctypes.c_int32 +} + +torch_available = CheckPackages().check_torch() +if torch_available: + import torch + import torch.nn.functional as F + + +def get_scalar(value): + """ + Returns a scalar value from a container (e.g., np.ndarray) + """ + if isinstance(value, np.ndarray): + if value.size != 1: + raise Exception("Scalars used in epilogue must be of size 1") + return value.reshape(-1)[0] + elif CheckPackages().check_torch() and isinstance(value, torch.Tensor): + if value.size != 1: + raise Exception("Scalars used in epilogue must be of size 1") + return value.reshape(-1)[0] + else: + return value + + +def to_ctype_value(value, dtype): + """ + Converts ``value`` to the corresponding storage needed for the ctype that + will store ``value``. + """ + scalar = get_scalar(value) + if dtype == DataType.f16: + # Convert f16 value into an integer + return int.from_bytes(np.float16(scalar).tobytes(), "little") + else: + return scalar + + +################################################################################################# +# +# Epilogue Functors +# +################################################################################################# + + +class EpilogueFunctorBase: + """ + Base class for thread-level epilogue functors + """ + + def __init__(self) -> None: + pass + + def emit(self, tag, template_argument): + template = """${tag}<${arguments}>""" + arguments = "" + for idx, arg in enumerate(template_argument): + arguments += arg + if idx < len(template_argument) - 1: + arguments += ", " + values = { + "tag": tag, + "arguments": arguments, + } + + return SubstituteTemplate(template, values) + + +class LinearCombination(EpilogueFunctorBase): + """ + Apply a linear combination operator to an array of elements + D = alpha * accumulator + beta * source + + :param element_output: data type used to load and store tensors + + :param epilogue_vector_length: number of elements computed per operation. + Usually it is 128/sizeof_bits, but we use 64 and 32 sometimes + when there are not enough data to store + + :param element_accumulator: Accumulator data type + + :param element_epilogue: data type used to compute linear combination + """ + + tag = "cutlass::epilogue::thread::LinearCombination" + + def __init__( + self, element_output, epilogue_vector_length, + element_accumulator=None, element_epilogue=None) -> None: + super().__init__() + + if element_accumulator is None: + element_accumulator = element_output + if element_epilogue is None: + element_epilogue = element_output + + self.element_output = element_output + self.element_accumulator = element_accumulator + self.element_epilogue = element_epilogue + self.epilogue_vector_length = epilogue_vector_length + + self.template_arguments = [ + DataTypeTag[element_output], + str(epilogue_vector_length), + DataTypeTag[element_accumulator], + DataTypeTag[element_epilogue], + ] + + c_element_epilogue = dtype2ctype[self.element_epilogue] + element_epilogue = self.element_epilogue + + class _EpilogueOutputOpParams(ctypes.Structure): + _fields_ = [ + ("alpha", c_element_epilogue), + ("beta", c_element_epilogue), + ("alpha_ptr", ctypes.c_void_p), + ("beta_ptr", ctypes.c_void_p) + ] + + def __init__(self, alpha, beta, *args) -> None: + self.alpha = to_ctype_value(alpha, element_epilogue) + self.beta = to_ctype_value(beta, element_epilogue) + + self.epilogue_type = _EpilogueOutputOpParams + + def emit(self): + return super().emit(self.tag, self.template_arguments) + + +class LinearCombinationClamp(LinearCombination): + """ + Applies a linear combination operator to an array of elements then clamps + the output before converting to the output element type. + + D = alpha * accumulator + beta * source + uniform + + :param element_output: data type used to load and store tensors + + :param epilogue_vector_length: number of elements computed per operation. + Usually it is 128/sizeof_bits, but we use 64 and 32 sometimes + when there are not enough data to store + + :param element_accumulator: Accumulator data type + + :param element_epilogue: data type used to compute linear combination + """ + + tag = "cutlass::epilogue::thread::LinearCombinationClamp" + + def __init__( + self, element_output, epilogue_vector_length, + element_accumulator=None, element_epilogue=None) -> None: + # Base constructor + super().__init__( + element_output, + epilogue_vector_length, + element_accumulator, + element_epilogue, + ) + + c_element_epilogue = dtype2ctype[self.element_epilogue] + element_epilogue = self.element_epilogue + + class _EpilogueOutputOpParams(ctypes.Structure): + _fields_ = [ + ("alpha", c_element_epilogue), + ("beta", c_element_epilogue), + ("alpha_ptr", ctypes.c_void_p), + ("beta_ptr", ctypes.c_void_p), + ] + + def __init__(self, alpha, beta, *args) -> None: + self.alpha = to_ctype_value(alpha, element_epilogue) + self.beta = to_ctype_value(beta, element_epilogue) + + self.epilogue_type = _EpilogueOutputOpParams + + +class FastLinearCombinationClamp(EpilogueFunctorBase): + """ + Applies a linear combination operator to an array of elements then clamps + the output before converting to the output element type. + + D = alpha * accumulator + beta * source + + Note: The below method only when problem_size_K <= 256 for signed int8 gemm + or problem_size_K <= 128 for unsigned int8 gemm. The default approach is + above. + + :param element_output: data type used to load and store tensors + + :param epilogue_vector_length: number of elements computed per operation. + Usually it is 128/sizeof_bits, but we use 64 and 32 sometimes + when there are not enough data to store + """ + + tag = "cutlass::epilogue::thread::FastLinearCombinationClamp" + + def __init__(self, element_output, epilogue_vector_length, *args) -> None: + super().__init__() + + self.template_arguments = [ + DataTypeTag[element_output], str(epilogue_vector_length) + ] + + self.element_accumulator = DataType.s32 + self.element_epilogue = DataType.f32 + + # get epilogue output op + c_element_epilogue = dtype2ctype[self.element_epilogue] + element_epilogue = self.element_epilogue + + class _EpilogueOutputOpParams(ctypes.Structure): + _fields_ = [ + ("alpha", c_element_epilogue), + ("beta", c_element_epilogue), + ("alpha_ptr", ctypes.c_void_p), + ("beta_ptr", ctypes.c_void_p), + ] + + def __init__(self, alpha, beta, *args) -> None: + self.alpha = to_ctype_value(alpha, element_epilogue) + self.beta = to_ctype_value(beta, element_epilogue) + + self.epilogue_type = _EpilogueOutputOpParams + + def emit(self): + return super().emit(self.tag, self.template_arguments) + + +class LinearCombinationGeneric(LinearCombination): + """ + Applies a linear combination operator followed by an activation function + to an array of elements. + + D = activation(alpha * accumulator + beta * source) + + :param activation_functor: input activation functor + + :param element_output: data type used to load and store tensors + + :param epilogue_vector_length: number of elements computed per operation. + Usually it is 128/sizeof_bits, but we use 64 and 32 sometimes + when there are not enough data to store + + :param element_accumulator: Accumulator data type + + :param element_epilogue: data type used to compute linear combination + """ + + tag = "cutlass::epilogue::thread::LinearCombinationGeneric" + + def __init__( + self, activation_functor, + element_output, epilogue_vector_length, + element_accumulator=None, element_epilogue=None) -> None: + super().__init__( + element_output, + epilogue_vector_length, + element_accumulator, + element_epilogue, + ) + + self.template_arguments = [ + activation_functor.emit()] + self.template_arguments + + self.activation_functor = activation_functor + self.element_epilogue = element_epilogue + + # get epilogue output op + self.epilogue_type = self.activation_functor.epilogue_output_op(self.element_epilogue) + + +class ActivationFunctor: + """ + Base class for frequently used activation functions + """ + + @staticmethod + def numpy(x: np.ndarray): + raise NotImplementedError() + + @classmethod + def emit(cls): + return ActivationOpTag[cls.binding_type] + + @staticmethod + def epilogue_output_op(element_epilogue): + c_element_epilogue = dtype2ctype[element_epilogue] + + class _EpilogueOutputOpParams(ctypes.Structure): + _fields_ = [ + ("alpha", c_element_epilogue), + ("beta", c_element_epilogue), + ("alpha_ptr", ctypes.c_void_p), + ("beta_ptr", ctypes.c_void_p), + ] + + def __init__(self, alpha, beta, *args) -> None: + self.alpha = to_ctype_value(alpha, element_epilogue) + self.beta = to_ctype_value(beta, element_epilogue) + + return _EpilogueOutputOpParams + +class ActivationMeta(type): + @classmethod + def __call__(cls, x, *args): + if isinstance(x, np.ndarray): + return cls.numpy(x, *args) + elif torch_available and isinstance(x, torch.Tensor): + return cls.torch(x, *args) + else: + raise NotImplementedError("Unsupported tensor type") + + @classmethod + def numpy(cls, *args): + raise NotImplementedError(f"Numpy reference for {cls.__name__[:-4]} is not implemented.") + + @classmethod + def torch(cls, *args): + raise NotImplementedError(f"PyTorch reference for {cls.__name__[:-4]} is not implemented.") + +############################################################################## +# identity operator +class identityMeta(ActivationMeta): + @classmethod + def numpy(cls, x): + return x + + @classmethod + def torch(cls, x): + return x + +class identity(ActivationFunctor, metaclass=identityMeta): + binding_type = ActivationOp.Identity + + +############################################################################## +# ReLu operator +class reluMeta(ActivationMeta): + @classmethod + def numpy(cls, x): + return np.where(x > 0, x, 0) + + @classmethod + def torch(cls, x): + return F.relu(x) + +class relu(ActivationFunctor, metaclass=reluMeta): + binding_type = ActivationOp.ReLU + + +############################################################################## +# Leaky ReLu operator +class leakyReLUMeta(ActivationMeta): + @classmethod + def numpy(cls, x, leaky_alpha): + return np.maximum(x, 0) + np.minimum(x, 0) * leaky_alpha + + @classmethod + def torch(cls, x, leaky_alpha): + return F.leaky_relu(x, leaky_alpha) + +class leaky_relu(ActivationFunctor, metaclass=leakyReLUMeta): + binding_type = ActivationOp.LeakyReLU + + @staticmethod + def epilogue_output_op(element_epilogue): + c_element_epilogue = dtype2ctype[element_epilogue] + + class _EpilogueOutputOpParams(ctypes.Structure): + _fields_ = [ + ("alpha", c_element_epilogue), + ("beta", c_element_epilogue), + ("alpha_ptr", ctypes.c_void_p), + ("beta_ptr", ctypes.c_void_p), + ("leaky_alpha", c_element_epilogue) + ] + + def __init__(self, alpha, beta, leaky_alpha=0.2, *args) -> None: + self.alpha = to_ctype_value(alpha, element_epilogue) + self.beta = to_ctype_value(beta, element_epilogue) + self.alpha_ptr = 0 + self.beta_ptr = 0 + self.leaky_alpha = to_ctype_value(leaky_alpha, element_epilogue) + + return _EpilogueOutputOpParams + + +############################################################################## +# Tanh operator +class tanhMeta(ActivationMeta): + @classmethod + def numpy(cls, x): + return np.tanh(x) + + @classmethod + def torch(cls, x): + return torch.tanh(x) + +class tanh(ActivationFunctor, metaclass=tanhMeta): + binding_type = ActivationOp.Tanh + + +############################################################################## +# Sigmoid operator +class sigmoidMeta(ActivationMeta): + @classmethod + def numpy(cls, x): + return 1.0 / (1.0 + np.exp(-x)) + + @classmethod + def torch(cls, x): + return F.sigmoid(x) + +class sigmoid(ActivationFunctor, metaclass=sigmoidMeta): + binding_type = ActivationOp.Sigmoid + + +############################################################################## +# SiLu operator +class siluMeta(ActivationMeta): + @classmethod + def numpy(cls, x): + return x * sigmoidMeta.numpy() + + @classmethod + def silu(cls, x): + return F.silu(x) + + +class silu(ActivationFunctor, metaclass=siluMeta): + binding_type = ActivationOp.SiLU + + +############################################################################## +# Hardswish operator +class hardswishMeta(ActivationMeta): + @classmethod + def numpy(cls, x): + relu6 = np.minimum(np.maximum(x + 3.0, 0), 6.0) + return x * relu6 / 6.0 + + @classmethod + def torch(cls, x): + return F.hardswish(x) + + +class hardswish(ActivationFunctor, metaclass=hardswishMeta): + binding_type = ActivationOp.HardSwish + + +############################################################################## +# GELU operator +class geluMeta(ActivationMeta): + @classmethod + def numpy(cls, x): + return 0.5 * x * (1 + erf(x / np.sqrt(2.0))) + + @classmethod + def torch(cls, x): + return F.gelu(x) + + +class gelu(ActivationFunctor, metaclass=geluMeta): + binding_type = ActivationOp.Gelu diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c82b71ad06a3793a5f4dd3d89a391a2bc671000 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/__init__.py @@ -0,0 +1,34 @@ +################################################################################ +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################ + +from cutlass.backend.evt.epilogue import EpilogueFunctorVisitor +from cutlass.backend.evt.frontend import PythonASTFrontend diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa47f617320d02adf309599d1d96653f236a127e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/__pycache__/epilogue.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/__pycache__/epilogue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..691879afc21fff836b6f228416143e213099a243 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/__pycache__/epilogue.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6d424dd80cdda50f1e0ce297e3652787af4b1d15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__init__.py @@ -0,0 +1,36 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +from cutlass.backend.evt.backend.sm80_emitter import Sm80Emitter +import cutlass.backend.evt.backend.sm80_nodes as sm80_nodes +from cutlass.backend.evt.backend.sm90_emitter import Sm90Emitter +import cutlass.backend.evt.backend.sm90_nodes as sm90_nodes diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52221628e4a255aab9fcceeebe834e689031c98d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/emitter_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/emitter_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02a9449fce3da4f620f5dfc03c0f5d55cedd75d9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/emitter_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/sm80_emitter.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/sm80_emitter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80c78abc60613197f6a3fa5cb69a9dd90d6ff13e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/sm80_emitter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/sm80_nodes.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/sm80_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1abfeba79999878ce209ab052f900409903aef4e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/sm80_nodes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/sm90_emitter.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/sm90_emitter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78e1eecf9c88f6c59912fe1844b8fecccf5238f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/sm90_emitter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/sm90_nodes.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/sm90_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f12d38d104e42c33474dacd0e55c2040ecd66437 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/__pycache__/sm90_nodes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/emitter_base.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/emitter_base.py new file mode 100644 index 0000000000000000000000000000000000000000..375378c943b5c7b4e79b934728e031140bcdbbc6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/emitter_base.py @@ -0,0 +1,158 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Base class for Epilogue Visitor Emitter +""" + +from cutlass import DataTypeTag +from cutlass.backend.evt.ir import TopoVisitorNode, DAGIR + + +class FusionCallbacks: + def __init__(self, dag_ir: DAGIR, cc: int, emit_CD=True) -> None: + """ + Emit the EVT fusion callbacks + :param dag_ir: the DAG IR holding the epilogue visitor + :param cc: compute capability + :param emit_CD: whether to emit nodes C & D as a part of the fusion callbacks + For Sm90, set emit_CD=False, as Tensor C & D are hardcoded in the collective API + so that their shared memory can be explicitly reused + For Sm89, set emit_CD=True as they are treated as normal AuxLoad & AuxStore nodes. + """ + self.dag_ir = dag_ir + self.emit_CD = emit_CD + self.cc = cc + if self.cc < 90: + self.namespace = "threadblock" + else: + self.namespace = "fusion" + + # + # Helper functions + # + + def get_visitor_name(self, node: str): + """ + Get the visitor name + """ + meta = self.dag_ir.get_node_meta(node) + if not isinstance(meta, TopoVisitorNode) and self.dag_ir.in_degree(node) > 0: + return f"EVT{meta.name_camel}" + else: + return meta.name_camel + + def emit(self): + node_metas = self.dag_ir.node_metas_topological_order() + epilogue_str = "" + # Step 1: emit individual node type decl + # emit the EVT & DAG connector + for meta in node_metas: + if not meta.disabled: + epilogue_str += self.emit_node(meta) + if not self.emit_CD and meta.name == "D": + continue + if isinstance(meta, TopoVisitorNode): + epilogue_str += self.emit_dag(meta) + else: + epilogue_str += self.emit_evt(meta) + + # Step 2: post-processing & get callback name + if not self.emit_CD: + if not self.dag_ir.has_node("C"): + epilogue_str += "using ElementC = void;\nusing StrideC = StrideD;\n" + output_node = self.dag_ir.get_all_inputs("D")[0] + # The callback is the src of node D + callback_name = self.get_visitor_name(output_node) + else: + # The callback is the last node in the topological order + callback_name = self.get_visitor_name(node_metas[-1].name) + return epilogue_str, callback_name + + def emit_evt(self, node): + if self.dag_ir.in_degree(node.name) == 0: + return "" + + evt_tmp = f""" +using EVT{node.name_camel} = cutlass::epilogue::{self.namespace}::Sm{self.cc}EVT< + {node.name_camel}, +""" + sorted_children = self.dag_ir.get_all_inputs(node.name) + evt_node_strs = [f" {self.get_visitor_name(child_name)}" for child_name in sorted_children] + evt_tmp += ",\n".join(evt_node_strs) + ">;\n" + + return evt_tmp + + def emit_dag(self, node): + subgraph = node.subgraph + subgraph_nodes = subgraph.nodes_topological_order() + # Emit the Edge Tuple + edge_tuples = "cute::tuple<\n" + for n in subgraph_nodes[:-1]: + in_edges = subgraph.in_edges(n) + edge_weights = [subgraph.get_edge_weight(edge[0], edge[1]) for edge in in_edges] + sorted_children = [edge[0] for _, edge in sorted(zip(edge_weights, in_edges))] + edge_tuple = " cute::seq<" + edge_str = [str(subgraph_nodes.index(child)) for child in sorted_children] + edge_tuple += ", ".join(edge_str) + ">,\n" + + edge_tuples += edge_tuple + edge_tuples += " >" + + # Emit the node list + dag_nodes = "" + dag_node_strs = [] + for n in subgraph_nodes[:-1]: + n_meta = subgraph.get_node_meta(n) + if n_meta.disabled: + dag_node_strs.append(f" {self.get_visitor_name(n)}") + else: + dag_node_strs.append(f" {n_meta.name_camel}") + dag_nodes = ",\n".join(dag_node_strs) + + return f""" +using {node.name_camel} = cutlass::epilogue::{self.namespace}::Sm{self.cc}TopologicalVisitor< + {DataTypeTag[node.subgraph.element_compute]}, + {edge_tuples}, +{dag_nodes} +>; +""" + + def emit_node(self, node): + if isinstance(node, TopoVisitorNode): + emission = "" + for node in node.subgraph.node_metas_topological_order(): + if not node.disabled: + emission += self.emit_node(node) + return emission + else: + return node.underlying_impl.type_decl diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/sm80_emitter.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/sm80_emitter.py new file mode 100644 index 0000000000000000000000000000000000000000..849cb6222df672824de39fde6d4449378df7c5e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/sm80_emitter.py @@ -0,0 +1,47 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Emitter for Sm80 Epilogue Visitor +""" + +from cutlass.backend.evt.backend.emitter_base import FusionCallbacks +from cutlass.backend import GemmOperationUniversal + + +class Sm80Emitter: + def __init__(self, operation: GemmOperationUniversal, graph) -> None: + self.fusion_callbacks = FusionCallbacks(graph, cc=80) + + def emit(self): + callback_decl, callback_name = self.fusion_callbacks.emit() + return callback_name, callback_decl diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/sm80_nodes.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/sm80_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..0158a905d167d789973c210216063dc8efb7130e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/sm80_nodes.py @@ -0,0 +1,258 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +from cutlass import DataTypeTag + +from cutlass.backend.evt.ir import ( + # Load Node + AccumulatorImpl, + AuxLoadImpl, + ColumnBroadcastImpl, + LoadNode, + LoadSrcImpl, + RowBroadcastImpl, + ScalarBroadcastImpl, + # Compute Node + ComputeImpl, + # Store Node + AuxStoreImpl, + ColumnReductionImpl, + RowReductionImpl, + ScalarReductionImpl +) + +from cutlass.backend.library import ( + FloatRoundStyleTag, + FunctionalOp, + op_tag, +) + + +class Sm80AccumulatorImpl(AccumulatorImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f"""\nusing {self.name_camel} = cutlass::epilogue::threadblock::VisitorAccFetch;\n""" + return self._type_decl + + +class Sm80AuxLoadImpl(AuxLoadImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::threadblock::VisitorAuxLoad< + OutputTileThreadMap, {DataTypeTag[self.element]}, {self.stride_mnl} +>; +""" + return self._type_decl + + +class Sm80LoadSrcImpl(Sm80AuxLoadImpl): + pass + + +class Sm80ScalarBroadcastImpl(ScalarBroadcastImpl): + def __init__(self, node: LoadNode) -> None: + super().__init__(node) + self.broadcast_count = 1 + self.reduction_fn = FunctionalOp.Multiplies + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::threadblock::VisitorScalarBroadcast< + {DataTypeTag[self.element]}, {self.stride_mnl}, {self.broadcast_count}, {op_tag(self.reduction_fn)} +>; +""" + return self._type_decl + + +class Sm80RowBroadcastImpl(RowBroadcastImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::threadblock::VisitorRowBroadcast< + OutputTileThreadMap, {DataTypeTag[self.element]}, + {self.stride_mnl} +>; +""" + return self._type_decl + + +class Sm80ColumnBroadcastImpl(ColumnBroadcastImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::threadblock::VisitorColBroadcast< + OutputTileThreadMap, {DataTypeTag[self.element]}, + {self.stride_mnl} +>; +""" + return self._type_decl + + +class Sm80ComputeImpl(ComputeImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::threadblock::VisitorCompute< + {op_tag(self.fn)}, {DataTypeTag[self.element_output]}, {DataTypeTag[self.element_compute]}, + {FloatRoundStyleTag[self.round_style]} +>; +""" + return self._type_decl + + +class Sm80AuxStoreImpl(AuxStoreImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::threadblock::VisitorAuxStore< + OutputTileThreadMap, {DataTypeTag[self.element]}, {FloatRoundStyleTag[self.round_style]}, + {self.stride_mnl} +>; +""" + return self._type_decl + + +class Sm80StoreDImpl(Sm80AuxStoreImpl): + pass + + +class Sm80ColumnReductionImpl(ColumnReductionImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::threadblock::VisitorColReduction< + {op_tag(self.reg_reduce_fn)}, {op_tag(self.gmem_reduce_fn)}, + OutputTileThreadMap, {DataTypeTag[self.element]}, + {DataTypeTag[self.element_compute]}, {FloatRoundStyleTag[self.round_style]}, + {self.stride_mnl} +>; +""" + return self._type_decl + + +class Sm80RowReductionImpl(RowReductionImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::threadblock::VisitorRowReduction< + {op_tag(self.reg_reduce_fn)}, {op_tag(self.gmem_reduce_fn)}, + OutputTileThreadMap, {DataTypeTag[self.element]}, + {DataTypeTag[self.element_compute]}, {FloatRoundStyleTag[self.round_style]}, + {self.stride_mnl} +>; +""" + return self._type_decl + + +class Sm80ScalarReductionImpl(ScalarReductionImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::threadblock::VisitorScalarReduction< + {op_tag(self.reg_reduce_fn)}, {op_tag(self.gmem_reduce_fn)}, + OutputTileThreadMap, {DataTypeTag[self.element]}, + {DataTypeTag[self.element_compute]}, {FloatRoundStyleTag[self.round_style]}, + {self.stride_mnl} +>; +""" + return self._type_decl diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/sm90_emitter.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/sm90_emitter.py new file mode 100644 index 0000000000000000000000000000000000000000..2e28cc3fb27a9a005e37a7462b18f803667aa866 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/sm90_emitter.py @@ -0,0 +1,98 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Emitter for Sm90 Epilogue Visitor +""" + +from cutlass import DataTypeTag, EpilogueScheduleTag +from cutlass.backend import GemmOperationUniversal +from cutlass.backend.evt.backend.emitter_base import FusionCallbacks + + +class CollectiveEpilogue: + def __init__(self, tile_description, + schedule, + element_c, + element_d, + fusion_callbacks) -> None: + + self.cta_tile_mnk = tile_description.threadblock_shape + self.element_c = element_c + self.element_d = element_d + self.schedule = schedule + self.fusion_callbacks = fusion_callbacks + + @property + def CtaTileMNK(self) -> str: + """ + The threadblock shape + """ + return f"cute::Shape<_{self.cta_tile_mnk[0]}, _{self.cta_tile_mnk[1]}, _{self.cta_tile_mnk[2]}>" + + @property + def EpilogueTileType(self) -> str: + """ + The epilogue tile type + """ + return "cutlass::epilogue::collective::EpilogueTileAuto" + + @property + def Schedule(self) -> str: + return EpilogueScheduleTag[self.schedule] + + def emit(self): + callback_decl, callback_name = self.fusion_callbacks.emit() + return callback_name, f""" +using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< + {self.CtaTileMNK}, {self.EpilogueTileType}, + {DataTypeTag[self.element_c]}, {DataTypeTag[self.element_d]}, + {self.Schedule} +>; +{callback_decl} +""" + + +class Sm90Emitter: + def __init__(self, operation: GemmOperationUniversal, graph) -> None: + fusion_callbacks = FusionCallbacks(graph, cc=90, emit_CD=False) + + self.collective_epilogue = CollectiveEpilogue( + tile_description=operation.tile_description, + schedule=operation.tile_description.epilogue_schedule, + element_c=operation.C.element, + element_d=operation.C.element, + fusion_callbacks=fusion_callbacks + ) + + def emit(self): + return self.collective_epilogue.emit() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/sm90_nodes.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/sm90_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..3e29a3af1f721168f9b8feff637a50de818107ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/backend/sm90_nodes.py @@ -0,0 +1,351 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +from pycute import product + +from cutlass import DataTypeSize, DataTypeTag +from cutlass.backend.evt.ir import ( + # Load Node + AccumulatorImpl, + AuxLoadImpl, + ColumnBroadcastImpl, + LoadNode, + LoadSrcImpl, + RowBroadcastImpl, + ScalarBroadcastImpl, + # Compute Node + ComputeImpl, + ComputeNode, + # Store Node + AuxStoreImpl, + ColumnReductionImpl, + RowReductionImpl, + ScalarReductionImpl, + StoreNode, + StoreDImpl, +) +from cutlass.backend.library import ( + FloatRoundStyleTag, + FunctionalOp, + op_tag, +) + + +class Sm90AccumulatorImpl(AccumulatorImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f"""\nusing {self.name_camel} = cutlass::epilogue::fusion::Sm90AccFetch;\n""" + return self._type_decl + + +class Sm90LoadSrcImpl(LoadSrcImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using ElementC = {DataTypeTag[self.element]}; +using StrideC = {self.stride_mnl}; +using {self.name_camel} = cutlass::epilogue::fusion::Sm90SrcFetch; +""" + return self._type_decl + + +class Sm90AuxLoadImpl(AuxLoadImpl): + + @property + def descriptor(self) -> str: + """ + Descriptor for Aux Load + """ + return f"{self.name_camel}Descriptor" + + def decl_descriptor(self) -> str: + """ + Declare the descriptor type + """ + return f"\nusing {self.descriptor} = cutlass::epilogue::collective::detail::AuxLoadDescriptor;\n" + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = self.decl_descriptor() + self._type_decl += f""" +using {self.name_camel} = cutlass::epilogue::fusion::Sm90AuxLoad< + {self.descriptor}::Stages, typename {self.descriptor}::EpilogueTile, {DataTypeTag[self.element]}, + {self.stride_mnl}, typename {self.descriptor}::SmemLayoutAtom, typename {self.descriptor}::CopyOpS2R +>; +""" + return self._type_decl + + def get_smem_size(self, cta_tile_mnk, epilogue_tile_mn, stages_c, stages_d, epi_tiles): + """ + Get the shared memory size based on epilogue_tile_mn, stages_c, and stages_d + """ + return (DataTypeSize[self.element] * stages_c * product(epilogue_tile_mn) // 8, 128) + + +class Sm90ScalarBroadcastImpl(ScalarBroadcastImpl): + def __init__(self, node: LoadNode) -> None: + super().__init__(node) + self.broadcast_count = 1 + self.reduction_fn = FunctionalOp.Multiplies + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::fusion::Sm90ScalarBroadcast< + {DataTypeTag[self.element]}, {self.stride_mnl}, {self.broadcast_count}, {op_tag(self.reduction_fn)} +>; +""" + return self._type_decl + + +class Sm90RowBroadcastImpl(RowBroadcastImpl): + + @property + def descriptor(self) -> str: + """ + Descriptor for Aux Load + """ + return f"{self.name_camel}Descriptor" + + def decl_descriptor(self) -> str: + """ + Declare the descriptor type + """ + return f"\nusing {self.descriptor} = cutlass::epilogue::collective::detail::RowBroadcastDescriptor;\n" + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = self.decl_descriptor() + self._type_decl += f""" +using {self.name_camel} = cutlass::epilogue::fusion::Sm90RowBroadcast< + {self.descriptor}::Stages, typename EpilogueDescriptor::TileShape, + typename {self.descriptor}::Element, {self.stride_mnl} +>; +""" + return self._type_decl + + def get_smem_size(self, cta_tile_mnk, epilogue_tile_mn, stages_c, stages_d, epi_tiles): + """ + Get the shared memory size based on epilogue_tile_mn, stages_c, and stages_d + """ + stages = (stages_c + epi_tiles - 1) // epi_tiles + 1 + return (DataTypeSize[self.element] * cta_tile_mnk[1] * stages // 8, 16) + + +class Sm90ColumnBroadcastImpl(ColumnBroadcastImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::fusion::Sm90ColBroadcast< + 0 /*Stages*/, typename EpilogueDescriptor::TileShape, {DataTypeTag[self.element]}, + {self.stride_mnl} +>; +""" + return self._type_decl + + +class Sm90ComputeImpl(ComputeImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::fusion::Sm90Compute< + {op_tag(self.fn)}, {DataTypeTag[self.element_output]}, {DataTypeTag[self.element_compute]}, + {FloatRoundStyleTag[self.round_style]} +>; +""" + return self._type_decl + + +class Sm90AuxStoreImpl(AuxStoreImpl): + + @property + def descriptor(self) -> str: + """ + Descriptor for Aux Load + """ + return f"{self.name_camel}Descriptor" + + def decl_descriptor(self) -> str: + """ + Declare the descriptor type + """ + return f""" +using {self.descriptor} = cutlass::epilogue::collective::detail::AuxStoreDescriptor< + EpilogueDescriptor, {self.stride_mnl}, {DataTypeTag[self.element]} +>; +""" + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = self.decl_descriptor() + self._type_decl += f""" +using {self.name_camel} = cutlass::epilogue::fusion::Sm90AuxStore< + {self.descriptor}::Stages, typename {self.descriptor}::EpilogueTile, {DataTypeTag[self.element]}, + {FloatRoundStyleTag[self.round_style]}, {self.stride_mnl}, typename {self.descriptor}::SmemLayoutAtom, + typename {self.descriptor}::CopyOpR2S +>; +""" + return self._type_decl + + def get_smem_size(self, cta_tile_mnk, epilogue_tile_mn, stages_c, stages_d, epi_tiles): + """ + Get the shared memory size based on epilogue_tile_mn, stages_c, and stages_d + """ + return (DataTypeSize[self.element] * stages_d * product(epilogue_tile_mn) // 8, 128) + + +class Sm90StoreDImpl(StoreDImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + return f""" +using ElementD = {DataTypeTag[self.element]}; +using StrideD = {self.stride_mnl}; +""" + + +class Sm90ColumnReductionImpl(ColumnReductionImpl): + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::fusion::Sm90ColReduction< + {op_tag(self.reg_reduce_fn)}, {op_tag(self.gmem_reduce_fn)}, 0, + typename EpilogueDescriptor::TileShape, {DataTypeTag[self.element]}, + {DataTypeTag[self.element_compute]}, {FloatRoundStyleTag[self.round_style]}, + {self.stride_mnl} +>; +""" + return self._type_decl + + +class Sm90RowReductionImpl(RowReductionImpl): + + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::fusion::Sm90RowReduction< + {op_tag(self.reg_reduce_fn)}, {op_tag(self.gmem_reduce_fn)}, 0 /* Stages */, + typename EpilogueDescriptor::TileShape, {DataTypeTag[self.element]}, + {DataTypeTag[self.element_compute]}, {FloatRoundStyleTag[self.round_style]}, + {self.stride_mnl} +>; +""" + return self._type_decl + + +class Sm90ScalarReductionImpl(ScalarReductionImpl): + + + @property + def type_decl(self): + """ + Return the string defining the type + """ + if self._type_decl is not None: + return self._type_decl + + self._type_decl = f""" +using {self.name_camel} = cutlass::epilogue::fusion::Sm90ScalarReduction< + {op_tag(self.reg_reduce_fn)}, {op_tag(self.gmem_reduce_fn)}, + {DataTypeTag[self.element]}, {DataTypeTag[self.element_compute]}, + {FloatRoundStyleTag[self.round_style]}, {self.stride_mnl} +>; +""" + return self._type_decl diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/epilogue.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/epilogue.py new file mode 100644 index 0000000000000000000000000000000000000000..75bc703e9af5d4634356bc0f96162bfe6b13e703 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/epilogue.py @@ -0,0 +1,165 @@ +################################################################################ +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################ + +""" +Epilogue Visitor interface for compiling, and running visitor-based epilogue. +""" + +import ctypes + +from cuda import cuda +import numpy as np + +from cutlass import DataType +from cutlass.backend.epilogue import EpilogueFunctorBase +import cutlass.backend.evt.backend +from cutlass.backend.frontend import TensorFrontend + + +class EpilogueFunctorVisitor(EpilogueFunctorBase): + """ + Apply an epilogue functor described by the epilogue EVT + + :param cc: compute capability + :param visitor_frontend: user-provide visitor frontend + + """ + def __init__(self, cc: int, visitor, element_compute=DataType.f32) -> None: + # Type of Emitter based on CC + self.emit_cls = getattr(cutlass.backend.evt.backend, f"Sm{cc}Emitter") + + # Visitor Types + self.visitor = visitor + self.graph = visitor.dag_ir + + # Data types + self.element_epilogue = element_compute # element compute + self.element_output = self.graph.get_node_meta('D').underlying_impl.element + + # Epilogue Thread Type + epilogue_thread_type = self.visitor.epilogue_thread_type + if cc == 90: + self.arg_c_type = self.visitor.arg_c_type + self.arg_d_type = self.visitor.arg_d_type + output_names = self.visitor.return_names + reduction_names = self.visitor.reduction_names + + # Epilogue stages specialized for sm80 kernel + if cc == 80: + if hasattr(self.visitor, "epilogue_stages"): + self.epilogue_stages = self.visitor.epilogue_stages + assert self.epilogue_stages <= 2, "Only supports Stages <=2 in SM80 Epilogue" + + # Epilogue Argument Type + class _Arguments(ctypes.Structure): + """ + Concepts: + class _EpilogueArguments(ctypes.Structure): + _fields_ = [ + ("epilogue", _Arguments), <- this class + ("ptr_C", ctypes.c_void_p), + ("stride_C", StrideBatched_), + ("ptr_D", ctypes.c_void_p), + ("stride_D", StrideBatched_) + ] + """ + _fields_ = [ + ("output_op", epilogue_thread_type) + ] + + def __init__(self, kwargs: dict) -> None: + # The user-input kwargs is a dict of (name: tensors) + # We first convert all of them to device pointers + ptr_kwargs = {} + for key in kwargs.keys(): + is_output = key in output_names and key not in reduction_names + ptr_kwargs[key] = self.get_tensor_ptr(key, kwargs, is_output) + # Initialize the thread arguments + self.output_op = epilogue_thread_type(ptr_kwargs) + + def get_tensor_ptr(self, tensor_name, kwargs, is_output=False): + """ + Helper function for extracting device pointer + """ + # Skip the special tensors + if cc == 90: + if tensor_name in ["C", "D"]: + return 0 + if tensor_name not in kwargs.keys(): + raise ValueError(f"Tensor {tensor_name} is not provided.") + tensor = kwargs[tensor_name] + + # For float scalar constant, directly return the value + if isinstance(tensor, float): + return tensor + + # The tensor frontend returns a device buffer for np.ndarray + # and device ptr for other frontends + buffer_or_ptr = TensorFrontend.argument(tensor, is_output) + if isinstance(tensor, np.ndarray): + # Remember the host tensor for later synchronization + setattr(self, f"{tensor_name}_buffer", buffer_or_ptr) + setattr(self, f"{tensor_name}_host", tensor) + return int(buffer_or_ptr.ptr) + else: + return int(buffer_or_ptr) + + def sync(self): + """ + Synchronize the results from device to host + """ + for name in output_names: + if hasattr(self, f"{name}_host"): + host_tensor = getattr(self, f"{name}_host") + tensor_ptr = getattr(self, f"{name}_buffer").ptr + (err,) = cuda.cuMemcpyDtoH( + host_tensor, + tensor_ptr, + host_tensor.size * host_tensor.itemsize, + ) + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError("CUDA Error %s" % str(err)) + + self.epilogue_type = _Arguments + + def emit(self, operation): + """ + Emit the C++ code + """ + emitter = self.emit_cls(operation, self.graph) + return emitter.emit() + + def get_smem_size(self, tile_description): + """ + Get the shared memory size in bytes + """ + return self.visitor.get_smem_size(tile_description) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fd50a008453f672cbfbeb08334558c712f5d5eb9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/__init__.py @@ -0,0 +1,33 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +from cutlass.backend.evt.frontend.python_ast import PythonASTFrontend diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9907af39f06506b046704141e9322890a1edb2f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/__pycache__/frontend_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/__pycache__/frontend_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ba4069846556080ed822bff78318a239ad350ff Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/__pycache__/frontend_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/__pycache__/python_ast.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/__pycache__/python_ast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7efea55040f0cbb9b97572f6d6654030a3e70e0b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/__pycache__/python_ast.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/frontend_base.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/frontend_base.py new file mode 100644 index 0000000000000000000000000000000000000000..8d9f6c6e3702dff5212d2636072e3b52de40e10e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/frontend_base.py @@ -0,0 +1,262 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Base class for Python EVT Frontend +""" + +from typing import Union + +from cutlass import DataType +from cutlass.backend.evt.ir import ( + ComputeNode, + DAGIR, + LayoutNode, + LoadNode, + StoreNode, +) +from cutlass.backend.evt.passes import ( + EVTGraphDrawer, + EVTPassManager, + GetSmemSize, + PassDAG2Tree, + PassGetArgumentType, + PassGetImpl, + PassFixElementD, + PassLayoutManipulateElimination, + PassPreprocessRed, + PassShapeTypePropagation, +) +from cutlass.backend.utils import device_cc +from cutlass.epilogue.evt_ops import permute, reshape +from cutlass.utils.datatypes import library_type + + +class EVTFrontendBase: + layout_fns = { + "permute": permute, + "reshape": reshape + } + + def __init__(self, element_compute=DataType.f32, cc=None, additional_passes=[], **kwargs) -> None: + self.cc = cc if cc else device_cc() + self.element_compute = library_type(element_compute) + self.dag_ir = DAGIR(self.element_compute, self.cc) + self.compute_cnt = 0 + self.layout_cnt = 0 + + self.pass_manager = EVTPassManager( + self.dag_ir, + [ + PassPreprocessRed, + PassGetArgumentType, + PassShapeTypePropagation, + PassLayoutManipulateElimination, + PassGetImpl, + PassDAG2Tree, + PassFixElementD + ] + additional_passes) + + if self.cc == 80: + self._epilogue_stages = 1 + else: + self._epilogue_stages = None + + @property + def epilogue_stages(self): + return self._epilogue_stages + + @epilogue_stages.setter + def epilogue_stages(self, stages): + self._epilogue_stages = stages + + + def parse(self, *args, **kwargs): + raise NotImplementedError(f"The 'parse' function must be overloaded in frontend class") + + def trace(self, *args, **kwargs): + # Parse the input + self.parse(*args, **kwargs) + + # Run the passes + self.pass_manager() + # Set the epilogue type + self.epilogue_thread_type = self.dag_ir.epilogue_thread_type + if self.cc == 90: + self.arg_c_type = self.dag_ir.arg_c_type + self.arg_d_type = self.dag_ir.arg_d_type + self.reduction_names = self.dag_ir.reduction_names + + # + # Helper functions for DAG IR manipulation + # + + def add_node(self, node): + self.dag_ir.add_node(node) + + def add_edge(self, src, tgt, weight=0): + self.dag_ir.add_edge(src, tgt, weight=weight) + + def set_tensor(self, node_name, example): + """ + Add an example tensor to node {node_name} in the DAG IR + """ + meta = self.dag_ir.get_node_meta(node_name) + meta.tensor = {"tensor": example} + + def set_store_tensor(self, node_name, example): + """ + Add an example tensor to node {node_name} in the DAG IR + """ + meta = self.dag_ir.get_node_meta(node_name) + meta.store_tensor = {"tensor": example} + + def mark_output(self, node_name): + """ + Mark a store node as output + """ + meta = self.dag_ir.get_node_meta(node_name) + if not isinstance(meta, StoreNode): + raise ValueError( + f"Only StoreNodes can be marked as output. " + f"Got {type(meta).__name__}: {node_name}") + meta.is_output = True + + # Add node with specific type + + def add_load_node(self, name, example): + """ + Add a Load node to DAG IR + :param name: name of the loaded variable + :type name: str + :param example: example input + :type example: np.ndarray|torch.Tensor|cupy.ndarray|float + """ + if name is None: + raise ValueError(f"Name is not provided.") + if example is None: + raise ValueError(f"Example input for {name} is not provided.") + load_node = LoadNode(name) + load_node.tensor = {"tensor": example} + # Special logics for accumulator + if name == "accum": + if load_node.tensor.rank == 2: + new_shape = tuple([1, ] + list(load_node.tensor.shape)) + load_node.tensor.broadcast(new_shape) + elif load_node.tensor.rank < 2 or load_node.tensor.rank > 3: + raise ValueError(f"Expect example inputs for 'accum' be a rank-2 or rank-3 tensor. Got {load_node.tensor.shape}.") + self.add_node(load_node) + + def add_imm(self, value: Union[float,int]): + """ + Add an immediate scalar value to DAG IR + :param value: the value of the immediate scalar + :type value: float + """ + try: + value = float(value) + except: + raise ValueError(f"{type(value).__name__} cannot be converted to float.") + + name = f"imm_{value}".replace('.', '_') + load_node = LoadNode(name) + load_node.tensor = {"tensor": value, "is_constant": True} + self.add_node(load_node) + return name + + def add_compute_node(self, op, name=None): + """ + Add a compute node. + :param op: the computation op + :param name: the node name (optional) + :type name: str + :return: the name of the compute node + """ + if name is None: + name = f"compute_{self.compute_cnt}" + self.compute_cnt += 1 + compute_node = ComputeNode( + name=name, fn=op, + element_output=self.element_compute, + element_compute=self.element_compute) + self.add_node(compute_node) + return compute_node.name + + def add_layout_node(self, op, kwargs, name=None): + """ + Add a layout node. + :param op: the layout op + :type op: evt_ops + :param name: the node name (optional) + :type name: str + :return: the name of the layout node + """ + if name is None: + name = f"layout_{self.layout_cnt}" + self.layout_cnt += 1 + layout_node = LayoutNode(name=name, fn=op, kwargs=kwargs) + self.add_node(layout_node) + return layout_node.name + + def add_store_node(self, name): + store_node = StoreNode(name) + self.add_node(store_node) + + # + # Visualization The DAG IR + # + + def visualize(self, name="dag_ir"): + """ + Visualize the dag ir with svg file + :param name: the name of the graph + """ + drawer = EVTGraphDrawer(self.dag_ir, name) + if drawer.dot_available: + for name, graph in drawer.get_dot_graph(): + graph.write_svg(f"./{name}.svg") + else: + raise RuntimeError( + "'dot' is not found in path. GraphDrawer is disabled. " + "Please install it with 'sudo apt-get install graphviz'." + ) + + # + # Get shared memory size + # + + def get_smem_size(self, tile_description): + """ + Get the shared memory size of the epilogue + """ + smem_size = GetSmemSize(self.dag_ir)(tile_description) + return smem_size diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/python_ast.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/python_ast.py new file mode 100644 index 0000000000000000000000000000000000000000..ac799d80924ea1b2734ab5111e4a79f683001c01 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/frontend/python_ast.py @@ -0,0 +1,184 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Python AST frontend that parses input into DAG IR +""" + +import ast +import inspect +import textwrap + +import cutlass +from cutlass import DataType +from cutlass.backend.evt.frontend.frontend_base import EVTFrontendBase +from cutlass.backend.epilogue import relu +from cutlass.backend.library import FunctionalOp + + +class PythonASTFrontend(EVTFrontendBase, ast.NodeVisitor): + def __init__(self, element_compute=DataType.f32, **kwargs): + super().__init__(element_compute, **kwargs) + # Flags + # If this state is True, visit_Constant returns values without creating imm node + self.no_imm = False + self.visiting_return = False + + def parse(self, example_inputs): + self.example_inputs = example_inputs + self.source = textwrap.dedent(inspect.getsource(self.__call__)) + self.ast = ast.parse(self.source) + self.visit(self.ast) + + # + # Helper functions + # + @staticmethod + def ast_op_to_bindings(op): + mapping = { + ast.Add: FunctionalOp.Plus, + ast.Sub: FunctionalOp.Minus, + ast.Mult: FunctionalOp.Multiplies, + ast.Div: FunctionalOp.Divides, + "relu": relu.binding_type, + "multiply_add": FunctionalOp.MultiplyAdd, + "sum": (FunctionalOp.Plus, FunctionalOp.AtomicAdd), + "max": (FunctionalOp.Maximum, FunctionalOp.AtomicMaximum) + } + return mapping[op] + + # + # Visiting different node types + # + + def visit_FunctionDef(self, node: ast.FunctionDef): + # Visit args and register load nodes + for arg in node.args.args: + self.visit(arg) + for expr in node.body: + self.visit(expr) + + def visit_arg(self, node: ast.arg): + # Name of the argument + name = node.arg + try: + example_tensor = self.example_inputs[name] + except: + raise RuntimeError(f"Example input for {name} is not provided.") + + self.add_load_node(name, example_tensor) + + def visit_Name(self, node: ast.Name): + return node.id + + def visit_Constant(self, node: ast.Constant): + if self.no_imm: + return node.value + else: + name = self.add_imm(node.value) + return name + + def visit_Tuple(self, node: ast.Tuple): + results = [] + for elt in node.elts: + results.append(self.visit(elt)) + return tuple(results) + + def visit_keyword(self, node: ast.keyword): + return {node.arg: self.visit(node.value)} + + def visit_BinOp(self, node: ast.BinOp): + if self.visiting_return: + raise SyntaxError("Return value cannot be an expression") + lhs = self.visit(node.left) + rhs = self.visit(node.right) + op = self.ast_op_to_bindings(type(node.op)) + name = self.add_compute_node(op) + + # Add edges + # The edge weights are used to sort the input args + self.add_edge(lhs, name, weight=0) + self.add_edge(rhs, name, weight=1) + return name + + def visit_Assign(self, node: ast.BinOp): + target = self.visit(node.targets[0]) + value = self.visit(node.value) + # Create the assign node + self.add_store_node(target) + + # Add edges + self.add_edge(value, target) + return target + + def visit_Call(self, node: ast.Call): + if self.visiting_return: + raise SyntaxError("Return value cannot be an expression") + func = self.visit(node.func) + args = [self.visit(arg) for arg in node.args] + + if func in self.layout_fns.keys(): + # Parse kwargs + # By default, visiting imm automatically creates a load node + # However, in function call, keyword args are used to set + # specific function attributes such as indices for permute + # So no_imm is set to True temporarily + self.no_imm = True + kwargs = {} + for kw in node.keywords: + kwargs.update(self.visit(kw)) + self.no_imm = False + op = self.layout_fns[func] + name = self.add_layout_node(op, kwargs) + else: + op = self.ast_op_to_bindings(func) + name = self.add_compute_node(op) + + # Add edges + for idx, arg in enumerate(args): + self.add_edge(arg, name, weight=idx) + return name + + def visit_Return(self, node: ast.Return): + self.visiting_return = True + results = self.visit(node.value) + self.visiting_return = False + self.return_names = results + if not isinstance(results, tuple): + results = (results,) + for rst in results: + try: + example_tensor = self.example_inputs[rst] + except: + raise RuntimeError(f"Example input for {rst} is not provided.") + self.set_store_tensor(rst, example_tensor) + self.mark_output(rst) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9fc3614202b15947601406763ad72f94ec430c7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__init__.py @@ -0,0 +1,53 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +from cutlass.backend.evt.ir.compute_nodes import ComputeNode, ComputeImpl +from cutlass.backend.evt.ir.dag_ir import DAGIR +from cutlass.backend.evt.ir.layout_nodes import LayoutNode +from cutlass.backend.evt.ir.load_nodes import ( + LoadNode, + AccumulatorImpl, + LoadSrcImpl, + AuxLoadImpl, + RowBroadcastImpl, + ColumnBroadcastImpl, + ScalarBroadcastImpl +) +from cutlass.backend.evt.ir.node import TopoVisitorNode, NoOpImpl +from cutlass.backend.evt.ir.store_nodes import ( + StoreNode, + StoreDImpl, + AuxStoreImpl, + ColumnReductionImpl, + RowReductionImpl, + ScalarReductionImpl +) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1aabd3aa8e42ad1a632f57b1d91543230b09ba0e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/compute_nodes.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/compute_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39bb1eaffb417789955df3e0d5e89506b4fbed96 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/compute_nodes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/dag_ir.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/dag_ir.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32f9b9f6f7aac5fad7e63537b8f30481b294c6c3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/dag_ir.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/layout_algorithm.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/layout_algorithm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bff06d3ee0378c2f408137f6e670dc5fe9a00580 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/layout_algorithm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/layout_nodes.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/layout_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b72616de39b7a069218d25c5d7bfd3a7fc8b4c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/layout_nodes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/load_nodes.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/load_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97f6476416fd2b4dd7c24c43a6afd9f2203462cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/load_nodes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/node.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/node.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..419038f5597ea91875e42a182c6cbb26c2be4e79 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/node.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/store_nodes.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/store_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b603057300eaa8d603a315f72e0da2d370448ac Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/store_nodes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/tensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac35b90737c3a9e52f54d890b8f43b953e3def67 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/__pycache__/tensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/compute_nodes.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/compute_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..21592955072f5f15a1e6725fe6de83564e880214 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/compute_nodes.py @@ -0,0 +1,91 @@ +################################################################################ +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################ + +""" +Python registration for compute nodes in EVT +""" + +from cutlass.backend.evt.ir.node import NodeBase, ImplBase +from cutlass.backend.library import FloatRoundStyle + + +class ComputeImplBase(ImplBase): + """ + Base class for compute implementation + """ + def __init__(self, node) -> None: + super().__init__(node) + + +class ComputeImpl(ComputeImplBase): + """ + Implementation for Compute Node + """ + def __init__(self, node) -> None: + super().__init__(node) + + self.fn = node.fn + self.element_output = node.element_output + self.element_compute = node.element_compute + self.round_style = node.round_style + + @staticmethod + def match(node, problem_size: tuple): + return True + + +class ComputeNode(NodeBase): + """ + Compute Node in DAG IR + """ + possible_impls = [ + ComputeImpl + ] + def __init__( + self, name: str, fn, element_output, + element_compute, + round_style=FloatRoundStyle.ToNearest) -> None: + super().__init__(name) + self.op = "compute" + self.fn = fn + self.element_compute = element_compute + self.round_style = round_style + + def type_propagation(self, *args, **kwargs): + """ + Load node loads tensor under type `tensor.element` and returns an array of type `tensor.element`. + """ + self.element = self.element_compute + # In general, the compute nodes have element_output = element_compute + # In certain cases like producer of D it is overwritten by other passes + if not hasattr(self, "element_output"): + self.element_output = self.element diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/dag_ir.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/dag_ir.py new file mode 100644 index 0000000000000000000000000000000000000000..d0ac9402f0bbec9aec7b7b949e90a7dabd84c5e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/dag_ir.py @@ -0,0 +1,235 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +DAG IR used by Python EVT +""" + +import networkx as nx + +from cutlass import DataType +from cutlass.backend.evt.ir.node import NodeBase +from cutlass.backend.utils import device_cc + + +class DAGIR: + """ + ``DAGIR`` is the main data structure used in the EVT Intermediate Representation. + It consists of a series of ``Node`` s, each representing epilogue visitor nodes. + + In the DAGIR, ``node`` is an string of its name. ``node_meta`` is the underlying class of the node + """ + def __init__(self, element_compute=DataType.f32, cc: int=None) -> None: + # The EVT DAGIR is managed through the nextworkX Digraph class + self._graph = nx.DiGraph() + + self.element_compute = element_compute + + self.reduction_names = [] + + self.cc = cc if cc else device_cc() + + # + # IR manipulator + # + + def add_node(self, meta: NodeBase): + """ + Add a node to dag ir + """ + if self.has_node(meta.name): + raise SyntaxError(f"Variable '{meta.name}' cannot be defined twice.") + self._graph.add_node(meta.name, meta=meta) + + def add_edge(self, src: str, dst: str, weight: int=0): + """ + Add an edge src -> dst to dag ir with weight + """ + if not self.has_node(src): + raise SyntaxError(f"Variable '{src}' is undefined.") + if not self.has_node(dst): + raise SyntaxError(f"Variable '{dst}' is undefined.") + self._graph.add_edge(src, dst, weight=weight) + + def remove_node(self, node: str): + """ + Remove node from dag ir + """ + self._graph.remove_node(node) + + def remove_edge(self, src: str, dst: str): + """ + Remove edge src -> dst + """ + self._graph.remove_edge(src, dst) + + # + # Helper functions for getting attrs + # + + def has_node(self, node: str) -> bool: + """ + Check if the node is in the graph + """ + return self._graph.has_node(node) + + def in_degree(self, node: str): + """ + Get the input degree of node + """ + return self._graph.in_degree(node) + + def in_edges(self, node: str): + """ + Get the input edges of node + """ + return [edge for edge in self._graph.in_edges(node)] + + def out_degree(self, node: str): + """ + Get the output degree of node + """ + return self._graph.out_degree(node) + + def out_edges(self, node: str): + """ + Get the output edges of node + """ + return [edge for edge in self._graph.out_edges(node)] + + def get_node_meta(self, node: str): + """ + Get the meta data of the node + """ + return self._graph.nodes[node]["meta"] + + def get_edge_weight(self, src, dst): + """ + Get the edge weight of edge src->dst + """ + return self._graph.get_edge_data(src, dst)["weight"] + + # + # High-level helper functions + # + + def all_reachable_nodes(self, node: str): + """ + Get all the nodes reachable from the current node (exclude) + """ + return list(nx.dfs_preorder_nodes(self._graph, source=node)) + + def get_users(self, node: str): + """ + Get all users of the current node + """ + return [edge[1] for edge in self.out_edges(node)] + + def get_all_inputs(self, node: str): + """ + Get all the input nodes sorted by edge weight + """ + in_edges = self.in_edges(node) + edge_weights = [self.get_edge_weight(*edge) for edge in in_edges] + return [edge[0] for _, edge in sorted(zip(edge_weights, in_edges))] + + def get_all_inputs_meta(self, node: str): + """ + Get all the input node metas sorted by edge weight + """ + return [self.get_node_meta(input_node) for input_node in self.get_all_inputs(node)] + + def replace_all_uses_with(self, node1, node2): + """ + Replace all uses of node1 with node2 + """ + for edge in self.out_edges(node1): + weight = self.get_edge_weight(*edge) + user = edge[1] + self.add_edge(node2, user, weight) + self.remove_edge(node1, user) + self.remove_node(node1) + + # + # Node accessor + # + def nodes_topological_order(self): + """ + Get the nodes in the unique lexicographical topological order + It generates a unique ordering of nodes by first sorting topologically + and then additionally by sorting lexicographically. + + Although topological_sort alone also works, this generates a unique key + for each epilogue visitor pattern and ensures the compilation cache can be reused. + :return: list[str] + """ + return list(nx.lexicographical_topological_sort(self._graph)) + + def node_metas_topological_order(self): + """ + Get the node metas in topological order + :return: list[NodeBase] + """ + return [self.get_node_meta(node) for node in self.nodes_topological_order()] + + @property + def nodes(self): + """ + Get all nodes + :return: list[str] + """ + return list(self._graph.nodes) + + @property + def nodes_meta(self): + """ + Get all node metas + :return: list[NodeBase] + """ + return [data[1]['meta'] for data in self._graph.nodes.data()] + + @property + def edges(self): + """ + Get all edges + :return: list[(str, str)] + """ + return list(self._graph.edges) + + # + # Path + # + def has_path(self, src: str, target: str) -> bool: + """ + Return True is a path exists from src to target + """ + return nx.has_path(self._graph, src, target) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/layout_algorithm.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/layout_algorithm.py new file mode 100644 index 0000000000000000000000000000000000000000..3da35b8d682f7a8dd393a426229cbc453272f71f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/layout_algorithm.py @@ -0,0 +1,324 @@ +################################################################################ +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################ + +""" +Layout algebras +""" + +from pycute import Layout, composition, make_layout, flatten, product + + +def _infer_split(old_shape, new_shape): + old_shape = _tuple_to_list(old_shape) + new_shape = _tuple_to_list(new_shape) + if len(old_shape) == 0 and len(new_shape) == 0: + return [] + if len(old_shape) == 0: + if product(tuple(new_shape)) != 1: + raise ValueError("Invalid reshape size") + else: + return new_shape + if len(new_shape) == 0: + if product(tuple(old_shape)) != 1: + raise ValueError("Invalid reshape size") + else: + return old_shape + # This is done recursively by only process the last dimension at each time + old_dim = old_shape[-1] + new_dim = new_shape[-1] + # Exact match + if old_dim == new_dim: + return _infer_split(old_shape[:-1], new_shape[:-1]) + [new_dim,] + # Needs split + if old_dim > new_dim and old_dim % new_dim == 0: + residual = old_dim // new_dim + return _infer_split(old_shape[:-1] + [residual,], new_shape[:-1]) + [new_dim,] + # Needs merge + if old_dim < new_dim and new_dim % old_dim == 0: + residual = new_dim // old_dim + return _infer_split(old_shape[:-1], new_shape[:-1] + [residual,]) + [old_dim,] + + raise NotImplementedError(f"Unsupported split: {old_shape} -> {new_shape}") + +def _infer_merge(flatten_shape, shape): + flatten_shape = _tuple_to_list(flatten_shape) + shape = _tuple_to_list(shape) + idx_flat = 0 + merged_shape = [] + for dim in shape: + # Exact match + if dim == flatten_shape[idx_flat]: + merged_shape.append(dim) + idx_flat += 1 + # Need group + elif dim > flatten_shape[idx_flat] and dim % flatten_shape[idx_flat] == 0: + residual = dim + group = [] + while(residual > 1): + group.append(flatten_shape[idx_flat]) + residual = residual // flatten_shape[idx_flat] + idx_flat += 1 + merged_shape.append(group) + else: + raise NotImplementedError(f"Unsupported merge: {flatten_shape} -> {shape}") + + return merged_shape + +def _list_to_tuple(nested_list): + if isinstance(nested_list, list) or isinstance(nested_list, tuple): + return tuple(_list_to_tuple(item) for item in nested_list) + return nested_list + +def _tuple_to_list(nested_tuple): + if isinstance(nested_tuple, list) or isinstance(nested_tuple, tuple): + return list(_tuple_to_list(item) for item in nested_tuple) + return nested_tuple + +def _reverse_tuple(nested_tuple: tuple): + if isinstance(nested_tuple, tuple): + return tuple([_reverse_tuple(item) for item in nested_tuple][::-1]) + return nested_tuple + +def _get_first_lhs_nonzero_stride(stride_list, idx): + for i in reversed(range(idx)): + if stride_list[i] != 0: + return i + else: + return None + +def _get_first_rhs_nonzero_stride(stride_list, idx): + for i in range(idx+1, len(stride_list)): + if stride_list[i] != 0: + return i + else: + return None + +def reshape(layout, new_shape): + """ + General reshape of input layout. + It takes two steps: + 1. split the dimensions of the old layout + 2. merge the splitted dimensions according to the new shape + """ + # + # Step 1: Split the dimensions of the old layout + # + # 1.1 Flat old and new shape + old_flatten_shape = list(flatten(layout.shape)) + new_flatten_shape = list(flatten(new_shape)) + + # 1.2 Infer the flatten splitted shape + splitted_flatten_shape = _infer_split(old_flatten_shape, new_flatten_shape) + + # 1.3 Unflat the splitted shape based on the old shape + splited_shape = _infer_merge(splitted_flatten_shape, old_flatten_shape) + + # 1.4 Infer the type of each split + # If the split type is in row-major (R), the dimension list is reversed because + # the cute::composition only support column-major split + split_type = [] # the type of each split (ColumnMajor or RowMajor) + permuted_splitted_shape = [] + old_flatten_stride = list(flatten(layout.stride)) + for idx, dim in enumerate(splited_shape): + if not isinstance(dim, list): + permuted_splitted_shape.append(dim) + split_type.append("C") + else: + lhs_stride = _get_first_lhs_nonzero_stride(old_flatten_stride, idx) + rhs_stride = _get_first_rhs_nonzero_stride(old_flatten_stride, idx) + # Special case for single tuple + # Use column-major by default + if lhs_stride is None and rhs_stride is None: + permuted_splitted_shape.append(dim) + split_type.append("C") + else: + if lhs_stride is not None and rhs_stride is not None: + # We consider shape[idx]:stride[idx] + # Case 1: stride[idx - 1] <= stride[idx] <= stride[idx + 1]: column major + if lhs_stride <= old_flatten_stride[idx] and old_flatten_stride[idx] <= rhs_stride: + permuted_splitted_shape.append(dim) + split_type.append("C") + # Case 2: stride[idx - 1] > stride[idx] > stride[idx + 1]: row major + elif lhs_stride > old_flatten_stride[idx] and old_flatten_stride[idx] > rhs_stride: + permuted_splitted_shape.append([d for d in reversed(dim)]) + split_type.append("R") + # Case 3: stride[idx - 1] <= stride[idx] > stride[idx + 1]: concave + elif lhs_stride <= old_flatten_stride[idx] and old_flatten_stride[idx] > rhs_stride: + if lhs_stride >= rhs_stride: + permuted_splitted_shape.append(dim) + split_type.append("C") + else: + permuted_splitted_shape.append([d for d in reversed(dim)]) + split_type.append("R") + # Case 4: stride[idx - 1] > stride[idx] <= stride[idx + 1]: concave + elif lhs_stride > old_flatten_stride[idx] and old_flatten_stride[idx] <= rhs_stride: + if lhs_stride >= rhs_stride: + permuted_splitted_shape.append(dim) + split_type.append("C") + else: + permuted_splitted_shape.append([d for d in reversed(dim)]) + split_type.append("R") + else: + raise NotImplementedError() + elif lhs_stride is None: + # Case 1: dim's stride < dim+1's stride, expand in column major + if old_flatten_stride[idx] > rhs_stride: + permuted_splitted_shape.append([d for d in reversed(dim)]) + split_type.append("R") + else: + permuted_splitted_shape.append(dim) + split_type.append("C") + else: + # Case 1: dim's stride > dim-1's stride + if old_flatten_stride[idx] < lhs_stride: + permuted_splitted_shape.append([d for d in reversed(dim)]) + split_type.append("R") + else: + permuted_splitted_shape.append(dim) + split_type.append("C") + + # 1.4 Generate the splitted layout + permuted_splitted_layout = composition(layout, Layout(_list_to_tuple(permuted_splitted_shape))) + + # 1.5 Reverse the permutation in 1.4 before merge + splitted_shape = [] + splitted_stride = [] + for shape_dim, stride_dim, type in zip( + permuted_splitted_layout.shape, + permuted_splitted_layout.stride, + split_type): + if type == "C": + splitted_shape.append(shape_dim) + splitted_stride.append(stride_dim) + else: + splitted_shape.append(tuple([d for d in reversed(shape_dim)])) + splitted_stride.append(tuple([d for d in reversed(stride_dim)])) + splitted_layout = Layout(tuple(splitted_shape), tuple(splitted_stride)) + + + # + # Step 2: Merge the splitted dimensions according to the new shape + # + # 2.1 Merge layout + merged_layout = composition(splitted_layout, Layout(new_shape)) + + # 2.2 Cleaning up + output_layout = composition(merged_layout, Layout(new_shape)) + return output_layout + + +def permutation(layout, permutation): + """ + Permute the layout + """ + new_shape = tuple([layout.shape[idx] for idx in permutation]) + new_stride = tuple([layout.stride[idx] for idx in permutation]) + return Layout(new_shape, new_stride) + + +def _broadcast(layout, new_shape): + if len(layout) == 1 and isinstance(new_shape, int): + old_dim = layout.shape + old_stride = layout.stride + new_dim = new_shape + if old_dim == new_dim: + return Layout(old_dim, old_stride) + elif old_dim == 1: + return Layout(new_dim, 0) + else: + raise NotImplementedError(f"Invalid Broadcast: {old_dim} -> {new_dim}") + + # Align the dimensions + old_shape = layout.shape + if isinstance(old_shape, int): + old_shape = (old_shape,) + sub_layouts = [layout,] + else: + sub_layouts = [sub_layout for sub_layout in layout] + rhs_broadcast_layouts = [Layout(1, 0)] * (len(new_shape) - len(old_shape)) + # Get the broadcasted layout + broadcast_layouts = [] + try: + layout = make_layout(*sub_layouts, *rhs_broadcast_layouts) + broadcast_layouts = [] + for idx, sub_layout in enumerate(layout): + broadcast_layouts.append(_broadcast(sub_layout, new_shape[idx])) + except NotImplementedError: + layout = make_layout(*rhs_broadcast_layouts, *sub_layouts) + for idx, sub_layout in enumerate(layout): + broadcast_layouts.append(_broadcast(sub_layout, new_shape[idx])) + return make_layout(*broadcast_layouts) + + +def broadcast(layout, new_shape): + """ + Broadcast the new layout based on the input shape + The broadcasted shape equals to the new shape + The stride of broadcasted dimensions are 0 + """ + return _broadcast(layout, new_shape) + + +def debroadcast(layout, dims): + """ + Squeeze the 0-stride + """ + for dim in dims: + if layout.stride[dim] != 0: + raise ValueError(f"Dim{dim} cannot be debroadcasted as it has stride {layout.stride[dim]}") + new_shape = tuple([s for idx, s in enumerate(layout.shape) if idx not in dims]) + new_stride = tuple([s for idx, s in enumerate(layout.stride) if idx not in dims]) + return Layout(new_shape, new_stride) + + +def canonicalization_(shapes, strides): + if isinstance(shapes, tuple): + c_shapes = [] + c_strides = [] + for shape, stride in zip(shapes, strides): + c_shape, c_stride = canonicalization_(shape, stride) + c_shapes.append(c_shape) + c_strides.append(c_stride) + return tuple(c_shapes), tuple(c_strides) + else: + if shapes == 1: + return 1, 0 + else: + return shapes, strides + +def canonicalization(layout): + """ + Canonicalize the input layout + 1. set the stride of shape "1" to 0 + """ + new_shape, new_stride = canonicalization_(layout.shape, layout.stride) + return Layout(new_shape, new_stride) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/layout_nodes.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/layout_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..4262389897241c64d40b5da97ff9ea8544853501 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/layout_nodes.py @@ -0,0 +1,336 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Layout manipulation nodes and implementations + +The layout Nodes change the layout of intermediate nodes in epilogue visitor graph +""" + +from copy import deepcopy + +from pycute import product, flatten + +import cutlass +from cutlass import LayoutType +from cutlass.backend.evt.ir.layout_algorithm import _list_to_tuple, _tuple_to_list +from cutlass.backend.evt.ir.node import NodeBase +from cutlass.backend.evt.ir.tensor import Tensor + + +class PermutationImpl: + """ + Detailed implementation and helper functions for permutation + """ + def __init__(self, node) -> None: + assert "indices" in node.kwargs.keys() + self.indices = list(node.kwargs["indices"]) + self.inverse_indices = self.get_inverse_indices(self.indices) + + def get_inverse_impl(self): + inverse_impl = deepcopy(self) + inverse_impl.indices = self.inverse_indices + inverse_impl.inverse_indices = self.indices + return inverse_impl + + def update(self, shape): + num_dim = len(shape) + indices = self.indices + num_old_dim = len(indices) + # Add offset + for i, idx in enumerate(indices): + indices[i] = idx + num_dim - num_old_dim + # Add broadcast dims + for i in range(num_dim - num_old_dim): + indices = [i,] + indices + + self.indices = indices + self.inverse_indices = self.get_inverse_indices(self.indices) + + def get_inverse_indices(self, indices): + """ + Get the indices for inverse permutation + """ + num_dim = len(indices) + inverse_indices = [0] * num_dim + for i in range(num_dim): + inverse_indices[indices[i]] = i + return inverse_indices + + def shape_propagation(self, input_node_meta): + input_shape = input_node_meta.tensor.shape + output_shape = tuple([input_shape[idx] for idx in self.indices]) + return output_shape + + def broadcast(self, shape, node_meta: NodeBase): + """ + Broadcast the inputs based on current shape + """ + self.update(shape) + inverse_shape = tuple([shape[idx] for idx in self.inverse_indices]) + node_meta.tensor.broadcast(inverse_shape) + + def apply_to_user(self, usr_meta: NodeBase): + """ + Propagate the permutation to the users of the current nodes + """ + usr_meta.tensor.permute(self.inverse_indices) + if hasattr(usr_meta, "store_tensor"): + if usr_meta.store_tensor is not None: + usr_meta.store_tensor.permute(self.inverse_indices) + + def apply_to_input(self, input_meta: NodeBase): + """ + Propagate the permutation to inputs of the current nodes + """ + input_meta.tensor.permute(self.indices) + if hasattr(input_meta, "store_tensor"): + if input_meta.store_tensor is not None: + input_meta.store_tensor.permute(self.indices) + + +class ReshapeImpl: + """ + Detailed implementation and helper functions for reshape + """ + def __init__(self, node) -> None: + self.node = node + assert "new_shape" in node.kwargs.keys() + self.output_shape = _list_to_tuple(node.kwargs["new_shape"]) + + def get_inverse_impl(self): + inverse_impl = deepcopy(self) + inverse_impl.output_shape = self.input_shape + inverse_impl.input_shape = self.output_shape + return inverse_impl + + def shape_propagation(self, input_node_meta): + self.input_shape = input_node_meta.tensor.shape + return _list_to_tuple(self.output_shape) + + def broadcast(self, shape, node_meta: NodeBase): + """ + Broadcast the inputs based on current shape. + """ + # Step 1: infer split + flatten_split_shape = self.infer_split(flatten(self.input_shape), flatten(self.output_shape)) + split_input_shape = self.infer_merge(flatten_split_shape, self.input_shape) + split_output_shape = self.infer_merge(flatten_split_shape, self.output_shape) + + # broadcast shape -> split_output_shape -> flatten_split_shape + if len(shape) - len(split_output_shape) > 0: + for _ in range(len(shape) - len(split_output_shape)): + split_output_shape = [1,] + split_output_shape + flatten_split_shape = [1,] + flatten_split_shape + split_input_shape = [1,] + split_input_shape + broadcast_factor = [] + for dim, old_dim in zip(shape, split_output_shape): + if not isinstance(dim, list): + dim = [dim,] + if not isinstance(old_dim, list): + old_dim = [old_dim,] + if product(tuple(dim)) == product(tuple(old_dim)): + broadcast_factor += [1] * len(old_dim) + elif product(tuple(old_dim)) == 1: + assert len(dim) == 1 + broadcast_factor.append(dim[0]) + else: + raise NotImplementedError(f"Invalid Broadcast: {old_dim} -> {dim}") + + # flatten_split_shape -> split_input_shape + factor_idx = 0 + broadcast_split_input_shape = [] + for dim in split_input_shape: + if isinstance(dim, list): + new_dim = [] + for d in dim: + new_dim.append(d * broadcast_factor[factor_idx]) + factor_idx += 1 + broadcast_split_input_shape.append(new_dim) + else: + broadcast_split_input_shape.append(dim * broadcast_factor[factor_idx]) + factor_idx += 1 + broadcast_split_input_shape = _list_to_tuple(broadcast_split_input_shape) + node_meta.tensor.reshape(_list_to_tuple(split_input_shape)) + node_meta.tensor.broadcast(broadcast_split_input_shape) + # Last reshape op to clean up + broadcast_input_shape = tuple([product(dim) for dim in broadcast_split_input_shape]) + node_meta.tensor.reshape(broadcast_input_shape) + # Update the input shape and output shape + self.input_shape = _list_to_tuple(node_meta.tensor.shape) + self.output_shape = _list_to_tuple(shape) + + def apply_to_user(self, user_meta: NodeBase): + """ + Propagate the reshape to user nodes + """ + user_meta.tensor.reshape(tuple(self.input_shape)) + if hasattr(user_meta, "store_tensor"): + if user_meta.store_tensor is not None: + user_meta.store_tensor.reshape(tuple(self.input_shape)) + + def apply_to_input(self, input_meta: NodeBase): + """ + Propagate the reshape to input nodes + """ + input_meta.tensor.reshape(tuple(self.output_shape)) + if hasattr(input_meta, "store_tensor"): + if input_meta.store_tensor is not None: + input_meta.store_tensor.reshape(tuple(self.output_shape)) + + # + # Helper functions + # + + def infer_split(self, input_shape, output_shape): + """ + Infer the flatten splitted shape that can be merged to both input_shape and output_shape + """ + input_shape = _tuple_to_list(input_shape) + output_shape = _tuple_to_list(output_shape) + if len(input_shape) == 0 and len(output_shape) == 0: + return [] + if len(input_shape) == 0: + if product(tuple(output_shape)) != 1: + raise ValueError("Invalid reshape size") + else: + return output_shape + if len(output_shape) == 0: + if product(tuple(input_shape)) != 1: + raise ValueError("Invalid reshape size") + else: + return input_shape + # This is done recursively by only process the last dimension at each time + old_dim = input_shape[-1] + new_dim = output_shape[-1] + # Exact match + if old_dim == new_dim: + return self.infer_split(input_shape[:-1], output_shape[:-1]) + [new_dim,] + # Needs split + if old_dim > new_dim and old_dim % new_dim == 0: + residual = old_dim // new_dim + return self.infer_split(input_shape[:-1] + [residual,], output_shape[:-1]) + [new_dim,] + # Needs merge + if old_dim < new_dim and new_dim % old_dim == 0: + residual = new_dim // old_dim + return self.infer_split(input_shape[:-1], output_shape[:-1] + [residual,]) + [old_dim,] + + raise NotImplementedError(f"Unsupported split: {input_shape} -> {output_shape}") + + def infer_merge(self, flatten_shape, shape): + flatten_shape = _tuple_to_list(flatten_shape) + shape = _tuple_to_list(shape) + idx_flat = len(flatten_shape) - 1 + merged_shape = [] + for dim in reversed(shape): + # Exact match + if dim == flatten_shape[idx_flat]: + merged_shape.append(dim) + idx_flat -= 1 + # need group + elif dim > flatten_shape[idx_flat] and dim % flatten_shape[idx_flat] == 0: + residual = dim + group = [] + while(residual > 1): + group.append(flatten_shape[idx_flat]) + residual = residual // flatten_shape[idx_flat] + idx_flat -= 1 + merged_shape.append(group[::-1]) + else: + raise NotImplementedError(f"Unsupported merge: {flatten_shape} -> {shape}") + + return merged_shape[::-1] + + +class LayoutNode(NodeBase): + """ + Layout manipulation nodes + """ + fn_to_impl = { + "permute": PermutationImpl, + "reshape": ReshapeImpl + } + def __init__(self, name: str, fn, kwargs: dict) -> None: + super().__init__(name) + self.op = "layout" + self.fn = fn + self.kwargs = kwargs + self.underlying_impl = self.fn_to_impl[self.fn.__name__](self) + + def get_inverse_node(self): + inverse_node = deepcopy(self) + inverse_node.underlying_impl = self.underlying_impl.get_inverse_impl() + return inverse_node + + def shape_propagation(self, input_node_metas): + if self._tensor is not None: + return + assert len(input_node_metas) == 1, "Layout node can only have one input node" + + output_shape = self.underlying_impl.shape_propagation(input_node_metas[0]) + + self._tensor = Tensor( + element=self.element_output, + shape=output_shape, layout_tag=LayoutType.RowMajor + ) + + return super().shape_propagation(input_node_metas) + + def type_propagation(self, input_node_metas: 'list[NodeBase]'): + """ + The store nodes has element_output = element_input + """ + assert len(input_node_metas) == 1, "Layout node can only have one input node" + self.element_output = input_node_metas[0].element_output + + def broadcast_propagation(self, input_node_metas: 'list[NodeBase]'): + """ + Propagate the broadcast in the reversed topological order + """ + if self.tensor is None: + raise RuntimeError(f"The tensor of node {self.name} is unknown.") + shape = self.tensor.shape + + for child in input_node_metas: + self.underlying_impl.broadcast(shape, child) + + def apply_to_user(self, usr_meta: NodeBase): + """ + Propagate the permutation to user nodes + """ + self.underlying_impl.apply_to_user(usr_meta) + + def apply_to_input(self, input_meta: NodeBase): + """ + Propagate the permutation to input nodes + """ + self.underlying_impl.apply_to_input(input_meta) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/load_nodes.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/load_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..22abc33f61a042adf773abc8b0556fbc65fbd0ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/load_nodes.py @@ -0,0 +1,294 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Load nodes and implementations +""" + +import ctypes + +from cutlass.backend.c_types import tuple_factory +from cutlass.backend.epilogue import dtype2ctype, to_ctype_value +from cutlass.backend.evt.ir.node import NodeBase, ImplBase + + +class LoadImplBase(ImplBase): + """ + Base class for load node implementations + """ + reserved_names = ["accum", "C"] + def __init__(self, node) -> None: + super().__init__(node) + self.element = node.element + self.element_output = node.element_output + self.stride = node.tensor.stride + + +class AccumulatorImpl(LoadImplBase): + """ + Accumulator node implementation + """ + + @staticmethod + def match(node, problem_size: tuple): + return node.name == "accum" and node.tensor.shape == problem_size + + +class LoadSrcImpl(LoadImplBase): + """ + Load C implementation + """ + @property + def name_camel(self) -> str: + return "TensorC" + + @property + def argument_type_c(self): + stride_mnl = self.get_stride_mnl() + tuple_type = tuple_factory(stride_mnl, self.stride_dtype) + class _Argument(ctypes.Structure): + _fields_ = [ + ("ptr_C", ctypes.c_void_p), + ("stride_C", tuple_type) + ] + def __init__(self, ptr) -> None: + self.ptr_C = ptr + self.stride_C = tuple_type(stride_mnl) + + return _Argument + + @staticmethod + def match(node, problem_size: tuple): + return node.name == "C" and node.tensor.shape == problem_size + + +class AuxLoadImpl(LoadImplBase): + """ + Load arbitrary tensor + """ + @property + def argument_type(self): + stride_mnl = self.get_stride_mnl() + name = self.name + tuple_type = tuple_factory(stride_mnl, self.stride_dtype) + element_type = self.element + class _Argument(ctypes.Structure): + _fields_ = [ + ("ptr_aux", ctypes.c_void_p), + ("null_default", dtype2ctype[element_type]), + ("dAux", tuple_type) + ] + def __init__(self, kwargs) -> None: + ptr = kwargs[name] + self.ptr_aux = ptr + self.null_default = to_ctype_value(0, element_type) + self.dAux = tuple_type(stride_mnl) + + return _Argument + + @staticmethod + def match(node, problem_size: tuple): + if node.name in LoadImplBase.reserved_names: + return False + strideMN = node.tensor.stride[-2:] + if (strideMN[0] == 1 and strideMN[1] != 0 or + strideMN[0] != 0 and strideMN[1] == 1 ): + return True + else: + return False + + +class RowBroadcastImpl(LoadImplBase): + """ + Broadcast a row vector + """ + def __init__(self, node) -> None: + super().__init__(node) + self.stride_dtype = "int" + + @property + def argument_type(self): + stride_mnl = self.get_stride_mnl() + name = self.name + tuple_type = tuple_factory(stride_mnl, self.stride_dtype) + element_type = self.element + class _Argument(ctypes.Structure): + _fields_ = [ + ("ptr_row", ctypes.c_void_p), + ("null_default", dtype2ctype[element_type]), + ("dRow", tuple_type) + ] + def __init__(self, kwargs) -> None: + ptr = kwargs[name] + self.ptr_row = ptr + self.null_default = to_ctype_value(0, element_type) + self.dRow = tuple_type(stride_mnl) + + return _Argument + + @staticmethod + def match(node, problem_size: tuple): + if node.name in LoadImplBase.reserved_names: + return False + + strideMN = node.tensor.stride[-2:] + if strideMN == (0, 1): + return True + else: + return False + + +class ColumnBroadcastImpl(LoadImplBase): + """ + Broadcast a column vector + """ + def __init__(self, node) -> None: + super().__init__(node) + self.stride_dtype = "int" + + @property + def argument_type(self): + stride_mnl = self.get_stride_mnl() + name = self.name + tuple_type = tuple_factory(stride_mnl, self.stride_dtype) + element_type = self.element + class _Argument(ctypes.Structure): + _fields_ = [ + ("ptr_col", ctypes.c_void_p), + ("null_default", dtype2ctype[element_type]), + ("dCol", tuple_type) + ] + def __init__(self, kwargs) -> None: + ptr = kwargs[name] + self.ptr_col = int(ptr) + self.null_default = to_ctype_value(0, element_type) + self.dCol = tuple_type(stride_mnl) + + return _Argument + + @staticmethod + def match(node, problem_size: tuple): + if node.name in LoadImplBase.reserved_names: + return False + + strideMN = node.tensor.stride[-2:] + if strideMN == (1, 0): + return True + else: + return False + + +class ScalarBroadcastImpl(LoadImplBase): + """ + Broadcast a scalar + """ + def __init__(self, node) -> None: + super().__init__(node) + self.stride_dtype = "int" + + @property + def argument_type(self): + stride_mnl = self.get_stride_mnl() + name = self.name + tuple_type = tuple_factory(stride_mnl, self.stride_dtype) + element_type = self.element + + if self.tensor.is_constant: + value = self.tensor.value + class _Argument(ctypes.Structure): + _fields_ = [ + ("scalars", dtype2ctype[element_type]), + ("scalar_ptrs", ctypes.c_void_p), + ("dScalar", tuple_type) + ] + def __init__(self, kwargs) -> None: + self.scalars = to_ctype_value(value, element_type) + self.scalar_ptrs = 0 + self.dScalar = tuple_type(stride_mnl) + + else: + class _Argument(ctypes.Structure): + _fields_ = [ + ("scalars", dtype2ctype[element_type]), + ("scalar_ptrs", ctypes.c_void_p), + ("dScalar", tuple_type) + ] + def __init__(self, kwargs) -> None: + scalar_or_ptr = kwargs[name] + if isinstance(scalar_or_ptr, float): + self.scalars = to_ctype_value(scalar_or_ptr, element_type) + self.scalar_ptrs = 0 + else: + self.scalar_ptrs = int(scalar_or_ptr) + + self.dScalar = tuple_type(stride_mnl) + + return _Argument + + @staticmethod + def match(node, problem_size: tuple): + if node.name in LoadImplBase.reserved_names: + return False + + strideMN = node.tensor.stride[-2:] + if strideMN == (0, 0): + return True + else: + return False + + +class LoadNode(NodeBase): + """ + Load Node + """ + cnt = 0 + possible_impls = [ + AccumulatorImpl, LoadSrcImpl, AuxLoadImpl, + RowBroadcastImpl, ColumnBroadcastImpl, + ScalarBroadcastImpl + ] + def __init__(self, name: str) -> None: + if name is None: + name = f"load{LoadNode.cnt}" + LoadNode.cnt += 1 + super().__init__(name) + self.op = "load" + + def type_propagation(self, *args, **kwargs): + """ + Load node loads tensor under type `tensor.element` and returns an array of type `tensor.element`. + """ + if self.tensor is None: + raise RuntimeError(f"The tensor of node {self.name} is unknown.") + + self.element = self.tensor.element + self.element_output = self.tensor.element diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/node.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/node.py new file mode 100644 index 0000000000000000000000000000000000000000..9cf23331f3187f39d8a6f2e33ff5523bc815d69a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/node.py @@ -0,0 +1,292 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Base & visitor classes of DAGIR Nodes +""" + +import ctypes +from re import sub + +from cutlass import LayoutType +from cutlass.backend.evt.ir.layout_algorithm import _list_to_tuple, _reverse_tuple +from cutlass.backend.evt.ir.tensor import Tensor + + +class ImplBase: + """ + Base class for Node Implementation + """ + def __init__(self, node) -> None: + self.node = node + self.name = node.name + self.tensor = node.tensor + self._type_decl = None + self.stride_dtype = "int64_t" + + @staticmethod + def match(node, problem_size: tuple): + """ + Match function used in get_underlying_impl + """ + raise NotImplementedError(f"The `match` function is not defined.") + + @property + def argument_type(self): + """ + Default class for Argument Type + """ + class _Argument(ctypes.Structure): + _fields_ = [] + + def __init__(self, *args, **kwargs) -> None: + pass + + return _Argument + + @property + def name_camel(self) -> str: + """ + Return the CamelCase name. + """ + return sub(r"(_|-)+", " ", self.name).title().replace(" ", "") + + def _emit_cute_tuple(self, py_tuple): + """ + Emit the cute tuple to C++ code + """ + if isinstance(py_tuple, int): + if py_tuple in [0, 1]: + return f"cute::Int<{py_tuple}>" + else: + return f"{self.stride_dtype}" + elif isinstance(py_tuple, tuple): + decl = "cute::Stride<" + for item in py_tuple: + decl += self._emit_cute_tuple(item) + ", " + return decl[:-2] + ">" + else: + raise ValueError(f"_emit_cute_tuple only accepts tuple or int, got {type(py_tuple).__name__}") + + @property + def stride_mnl(self): + """ + Typename StrideMNL + """ + stride = _list_to_tuple([self.stride[-2], self.stride[-1]] + list(_reverse_tuple(tuple(self.stride[:-2])))) + return self._emit_cute_tuple(stride) + + def get_non_constant_stride(self, py_tuple): + if isinstance(py_tuple, int): + if py_tuple not in [0, 1]: + return py_tuple + else: + return None + non_constant_stride = [] + for item in py_tuple: + item_out = self.get_non_constant_stride(item) + if item_out: + non_constant_stride.append(item_out) + return tuple(non_constant_stride) + + def get_stride_mnl(self): + """ + Get the non-zero stride mnl. This is used in argument construction + """ + stride = _list_to_tuple([self.stride[-2], self.stride[-1]] + list(_reverse_tuple(tuple(self.stride[:-2])))) + return stride + + def get_smem_size(self, *args, **kwargs): + """ + Get the shared memory size and alignment of current node + """ + return (0, 1) + + +class NoOpImpl(ImplBase): + """ + The NoOpImpl does nothing but forward its input to users + """ + def __init__(self, node) -> None: + super().__init__(node) + + @staticmethod + def match(node, problem_size: tuple): + if node.op == "store": + # Store that is not output is a No OP + return not node.is_output + + +class NodeBase: + """ + Base class of DAG Node + """ + def __init__(self, name: str) -> None: + self.name = name + self.underlying_impl = None + + self._tensor = None + + # Whether the node is disabled for emit + self.disabled = False + + @property + def name_camel(self) -> str: + """ + Return the CamelCase name. + """ + return self.underlying_impl.name_camel + + @property + def tensor(self) -> Tensor: + """ + Return the output tensor (concept: cutlass.backend.evt.ir.tensor) + """ + return self._tensor + + @tensor.setter + def tensor(self, kwargs): + """ + Setting the tensor + """ + self._tensor = Tensor(**kwargs) + + # + # Helper functions for type/shape propagation + # + + def shape_propagation(self, input_node_metas): + """ + Infer shape from input nodes + General Broadcasting Rules from NumPy + When operating on two arrays, we compare their shapes element-wise. + It starts with the trailing (i.e. rightmost) dimension and works its + way left. Two dimensions are compatible when + 1. they are equal + 2. one of them is 1 + """ + if self._tensor is not None: + return + + shape = None + for src in input_node_metas: + src_shape = src.tensor.shape + if shape is None: + shape = src_shape + else: + len_difference = len(shape) - len(src_shape) + if len_difference > 0: + for _ in range(len_difference): + src_shape = [1, ] + list(src_shape) + elif len_difference < 0: + for _ in range(-len_difference): + shape = [1, ] + list(shape) + broadcasted_shape = [] + # Infer broadcast shape + for shape_dim, src_dim in zip(reversed(shape), reversed(src_shape)): + if shape_dim == 1: + broadcasted_shape = [src_dim, ] + list(broadcasted_shape) + elif src_dim == 1: + broadcasted_shape = [shape_dim, ] + list(broadcasted_shape) + elif shape_dim == src_dim: + broadcasted_shape = [shape_dim, ] + list(broadcasted_shape) + else: + error_msg = "Dimension mismatch between " + for src_ in input_node_metas: + error_msg += f"{src_.name}{src_.tensor.shape}, " + error_msg = error_msg[:-2] + "." + raise RuntimeError(error_msg) + shape = tuple(broadcasted_shape) + + self._tensor = Tensor(element=self.element_output, shape=shape, layout_tag=LayoutType.RowMajor) + + def type_propagation(self, *args, **kwargs): + """ + Each node is associated with two data types: `element` and `element_output`. + The `element_output` is the type of return array of the node. The `element` + has specific meaning for different node types. + * Load Node: data type of tensor in gmem + * Compute Node: element compute + * Store Node: data type of tensor in gmem + This function must be overloaded in the derived classes + """ + raise NotImplementedError(f"Function `type_propagation` is not overloaded in {self.__class__.__name__}") + + def broadcast_propagation(self, input_node_metas: 'list[NodeBase]'): + """ + Propagate the broadcast in the reversed topological order. + For example: + C[l, m, n] = A[m, 1] + B[l, m, n] + After the broadcast propagation, it will be come + C[l, m, n] = A[l, m, n] + B[l, m, n] + and each tensor will have a proper stride accessing the underlying tensor + """ + if self.tensor is None: + raise RuntimeError(f"The tensor of node {self.name} is unknown.") + for child in input_node_metas: + child.tensor.broadcast(self.tensor.shape) + + def get_underlying_impl(self, problem_size: tuple): + """ + Get the underlying implementation of the current node. + """ + if self.tensor is None: + raise RuntimeError(f"The Layout of node {self.name} is unknown. Please call PassShapeTypePropagation first.") + + for impl in self.possible_impls: + if impl.match(self, problem_size): + self.underlying_impl = impl(self) + break + + if self.underlying_impl is None: + raise NotImplementedError(f"No matching op for node {self.name} with stride {self.tensor.stride}.") + +# +# Visitor Nodes & Impls +# + +class TopoVisitorImpl(ImplBase): + """ + Impl for topological visitor + """ + def __init__(self, node) -> None: + super().__init__(node.output_node) + self.name = node.name + self.element_output = node.output_node.element_output + +class TopoVisitorNode(NodeBase): + def __init__(self, name: str, subgraph, output_node) -> None: + super().__init__(name) + self.subgraph = subgraph + self.output_node = output_node + self.op = "dag" + self.underlying_impl = TopoVisitorImpl(self) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/store_nodes.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/store_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..e050e43009b7113186247772d020c9ecc4a63a9c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/store_nodes.py @@ -0,0 +1,276 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Store node and implementations +""" + +import ctypes + +from cutlass import DataType +from cutlass.backend.c_types import tuple_factory +from cutlass.backend.epilogue import dtype2ctype, to_ctype_value +from cutlass.backend.evt.ir.node import NodeBase, ImplBase, NoOpImpl +from cutlass.backend.evt.ir.tensor import Tensor +from cutlass.backend.library import FloatRoundStyle, FunctionalOp + + +class StoreImplBase(ImplBase): + """ + Base class for store node implementation + """ + reserved_names = ["D"] + def __init__(self, node) -> None: + super().__init__(node) + self.element = node.element + self.element_output = node.element_output + self.stride = node.store_tensor.stride + + +class StoreDImpl(StoreImplBase): + """ + Store D implementation + """ + + @property + def argument_type_d(self): + stride_mnl = self.get_stride_mnl() + tuple_type = tuple_factory(stride_mnl, self.stride_dtype) + class _Argument(ctypes.Structure): + _fields_ = [ + ("ptr_D", ctypes.c_void_p), + ("stride_D", tuple_type) + ] + def __init__(self, ptr: int) -> None: + self.ptr_D = ptr + self.stride_D = tuple_type(stride_mnl) + + return _Argument + + @staticmethod + def match(node, problem_size: tuple): + if node.name == "D" and node.store_tensor.shape == problem_size: + return True + return False + + +class AuxStoreImpl(StoreImplBase): + def __init__(self, node) -> None: + super().__init__(node) + self.round_style = FloatRoundStyle.ToNearest + + @property + def argument_type(self): + stride_mnl = self.get_stride_mnl() + name = self.name + tuple_type = tuple_factory(stride_mnl, self.stride_dtype) + class _Argument(ctypes.Structure): + _fields_ = [ + ("ptr_aux", ctypes.c_void_p), + ("dAux", tuple_type) + ] + def __init__(self, kwargs) -> None: + ptr = kwargs[name] + self.ptr_aux = ptr + self.dAux = tuple_type(stride_mnl) + + return _Argument + + @staticmethod + def match(node, problem_size: tuple): + if not node.is_output: + return False + if node.name in StoreImplBase.reserved_names: + return False + + strideMN = node.store_tensor.stride[-2:] + if (strideMN[0] == 1 and strideMN[1] != 0 or + strideMN[0] != 0 and strideMN[1] == 1 ): + return True + else: + return False + + +class ReductionImplBase(StoreImplBase): + def __init__(self, node) -> None: + super().__init__(node) + self.element = node.store_tensor.element + self.element_compute = node.element_compute + self.reg_reduce_fn = self.node.reg_reduce_fn + self.gmem_reduce_fn = self.node.gmem_reduce_fn + self.round_style = node.round_style + self.stride_dtype = "int" + + def get_reduce_identity(self): + """ + Return the reduction identity of the current reduce_fn + """ + maxes = { + DataType.f32: (2 ** 31) - 1, + DataType.f16: (2 ** 15), + DataType.s32: (2 ** 31) - 1, + DataType.s8: (2 ** 7) - 1 + } + mins = { + DataType.f32: -maxes[DataType.f32], + DataType.f16: -maxes[DataType.f16], + DataType.s32: -maxes[DataType.s32], + DataType.s8: -maxes[DataType.s8] + } + if self.reg_reduce_fn == FunctionalOp.Maximum: + if self.element_compute not in mins: + raise Exception(f"No min entry for data type {self.element_compute}") + return to_ctype_value(mins[self.element_compute], self.element_compute) + elif self.reg_reduce_fn == FunctionalOp.Multiplies: + return to_ctype_value(1., self.element_compute) + elif self.reg_reduce_fn == FunctionalOp.Minimum: + if self.element_compute not in maxes: + raise Exception(f"No max entry for data type {self.element_compute}") + return to_ctype_value(maxes[self.element_compute], self.element_compute) + else: + return to_ctype_value(0., self.element_compute) + + @property + def argument_type(self): + self.get_reduce_identity() + stride_mnl = self.get_stride_mnl() + name = self.name + tuple_type = tuple_factory(stride_mnl, self.stride_dtype) + element_compute = self.element_compute + reduce_identity = self.get_reduce_identity() + class _Argument(ctypes.Structure): + _fields_ = [ + ("ptr", ctypes.c_void_p), + ("reduce_identity", dtype2ctype[element_compute]), + ("dMNL", tuple_type) + ] + def __init__(self, kwargs) -> None: + ptr = kwargs[name] + self.ptr = ptr + self.reduce_identity = reduce_identity + self.dMNL = tuple_type(stride_mnl) + + return _Argument + + +class ColumnReductionImpl(ReductionImplBase): + + @staticmethod + def match(node, problem_size: tuple): + if not node.is_output: + return False + if node.name in StoreImplBase.reserved_names: + return False + + strideMN = node.store_tensor.stride[-2:] + if strideMN == (1, 0): + return True + else: + return False + + +class RowReductionImpl(ReductionImplBase): + + @staticmethod + def match(node, problem_size: tuple): + if not node.is_output: + return False + if node.name in StoreImplBase.reserved_names: + return False + + strideMN = node.store_tensor.stride[-2:] + if strideMN == (0, 1): + return True + else: + return False + + +class ScalarReductionImpl(ReductionImplBase): + + @staticmethod + def match(node, problem_size: tuple): + if not node.is_output: + return False + if node.name in StoreImplBase.reserved_names: + return False + + strideMN = node.store_tensor.stride[-2:] + if strideMN == (0, 0): + return True + else: + return False + + +class StoreNode(NodeBase): + """ + Store node + """ + possible_impls = [ + AuxStoreImpl, RowReductionImpl, + ColumnReductionImpl, ScalarReductionImpl, + NoOpImpl, StoreDImpl + ] + def __init__(self, name: str) -> None: + super().__init__(name) + self.op = "store" + self.is_output = False + self._store_tensor = None + + @property + def store_tensor(self) -> Tensor: + """ + Return the output tensor (concept: cutlass.backend.evt.ir.tensor) + """ + return self._store_tensor + + @store_tensor.setter + def store_tensor(self, kwargs): + """ + Setting the tensor + """ + self._store_tensor = Tensor(**kwargs) + + def type_propagation(self, input_node_metas: 'list[NodeBase]'): + """ + The store nodes has element_output = element_input + """ + if self.is_output: + if self.store_tensor is None: + raise RuntimeError(f"The store tensor of node {self.name} is unknown.") + self.element = self.store_tensor.element + assert len(input_node_metas) == 1, "Store node can only have one input node" + self.element_output = input_node_metas[0].element_output + + def broadcast_propagation(self, input_node_metas: 'list[NodeBase]'): + super().broadcast_propagation(input_node_metas) + if self.is_output: + self._store_tensor.broadcast(self.tensor.shape) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/tensor.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..aa0c008e89c1db469b3ff510dbdb659cb35f9c27 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/ir/tensor.py @@ -0,0 +1,130 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +High-level class for tensor +""" + +from cutlass import LayoutType + +from cutlass.backend.evt.ir.layout_algorithm import ( + Layout, + broadcast, + canonicalization, + permutation, + reshape, + _reverse_tuple +) +from cutlass.utils.datatypes import get_datatype_and_layout, get_tensor_shape, library_type + + +class Tensor: + """ + The tensor abstracts the data type + """ + def __init__(self, tensor=None, element=None, shape=None, layout_tag=None, is_constant=False) -> None: + if element is not None and tensor is not None: + raise Exception(f"Must not specify both element and tensor") + elif shape is not None and tensor is not None: + raise Exception(f"Must not specify both shape and tensor") + elif layout_tag is not None and tensor is not None: + raise Exception(f"Must not specify both layout_tag and tensor") + elif (element is None or layout_tag is None or shape is None) and (tensor is None) : + raise Exception(f"Must specify one of (element, shape, layout) or (tensor)") + + if isinstance(tensor, Tensor): + # Directly copy all the attributes + self.__dict__.update(vars(tensor)) + else: + if tensor is None: + self.element = library_type(element) + else: + self.element, layout_tag = get_datatype_and_layout(tensor) + shape = get_tensor_shape(tensor) + if layout_tag == LayoutType.RowMajor: + self.layout = Layout(shape[::-1]) + elif layout_tag == LayoutType.ColumnMajor: + self.layout = permutation(Layout(shape), [idx for idx in reversed(range(len(shape)))]) + self.layout = canonicalization(self.layout) + + self.is_constant = is_constant + # Save the tensor value if it is constant + if is_constant and tensor is not None: + self.value = tensor + + @property + def shape(self): + """ + Returns the RowMajor layout shape + """ + return _reverse_tuple(self.layout.shape) + + @property + def stride(self): + """ + Returns the RowMajor layout stride + """ + return _reverse_tuple(self.layout.stride) + + @property + def rank(self): + """ + Returns the rank of the tensor + """ + return len(self.shape) + + # + # Layout Algorithms + # + + def broadcast(self, shape): + """ + Broadcast self.layout to shape + """ + assert isinstance(shape, tuple) + self.layout = broadcast(self.layout, _reverse_tuple(shape)) + + def reshape(self, shape): + """ + Reshape self.layout to shape + """ + assert isinstance(shape, tuple) + reverse_shape = _reverse_tuple(shape) + self.layout = reshape(self.layout, reverse_shape) + + def permute(self, indices): + """ + Permute self.layout according to indices + """ + length = len(indices) + indices = [length - idx - 1 for idx in indices] + self.layout = permutation(self.layout, indices[::-1]) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..265e2180bada10fc449521860fd468506ad49f77 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__init__.py @@ -0,0 +1,42 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +from cutlass.backend.evt.passes.graph_drawer import EVTGraphDrawer +from cutlass.backend.evt.passes.pass_argument_type import PassGetArgumentType +from cutlass.backend.evt.passes.pass_dag_2_tree import PassDAG2Tree +from cutlass.backend.evt.passes.pass_get_impl import PassGetImpl +from cutlass.backend.evt.passes.pass_fix_element_d import PassFixElementD +from cutlass.backend.evt.passes.pass_layout_elimination import PassLayoutManipulateElimination +from cutlass.backend.evt.passes.pass_manager import EVTPassManager +from cutlass.backend.evt.passes.pass_preprocess_red import PassPreprocessRed +from cutlass.backend.evt.passes.pass_shape_type_propagation import PassShapeTypePropagation +from cutlass.backend.evt.passes.smem_size_calculator import GetSmemSize diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5578d4bd37fdc8b7a0526859cf279882fce815d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/graph_drawer.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/graph_drawer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8013518400cebeec117b4daa75d222cf71d4a165 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/graph_drawer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_argument_type.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_argument_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f44540a4903a06bfdefb2c45112da37fcced6c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_argument_type.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_dag_2_tree.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_dag_2_tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..486470ee22cc1b37a9efdfd640d3da3b1128c8dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_dag_2_tree.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_fix_element_d.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_fix_element_d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca422bdb8427b118d194b761bef0e7da0fb71e64 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_fix_element_d.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_get_impl.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_get_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b74a7954a9594325fcd2d40f6bae9ae6e5c8b671 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_get_impl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_layout_elimination.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_layout_elimination.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6cec6e9270ec4a687742305a31a41fe0f8617fa Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_layout_elimination.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_manager.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93147e3c6cae690fb438d690b8febc602661dd16 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_manager.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_no_op_elimination.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_no_op_elimination.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..810332d586c25451179dd4130e66fe5672b8ec8b Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_no_op_elimination.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_preprocess_red.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_preprocess_red.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59104d33629a0f60667586d8b4bae22fa1037ffc Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_preprocess_red.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_shape_type_propagation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_shape_type_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fad754c2bcd4a146eb11ead3daeedb0211fca8d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/pass_shape_type_propagation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/smem_size_calculator.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/smem_size_calculator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d27b8ddff37f9218c10c62aac5a895d38f70387d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/__pycache__/smem_size_calculator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/graph_drawer.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/graph_drawer.py new file mode 100644 index 0000000000000000000000000000000000000000..83406f96ee88d8bd10287eb0f42552389793ca07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/graph_drawer.py @@ -0,0 +1,158 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +import subprocess + +import pydot + +from cutlass import DataTypeTag +from cutlass.backend.evt.ir.dag_ir import DAGIR + + +_COLOR_MAP = { + "load": '"AliceBlue"', + "compute": "LemonChiffon1", + "accumulator": "LightGrey", + "store": "PowderBlue", + "layout": "lightseagreen", + "dag": "darkorange" +} + + +class EVTGraphDrawer: + """ + Visualize a EVT DAGIR with graphviz + """ + def __init__( + self, + graph: DAGIR, + name: str + ): + self._name = name + self._dot_graphs = {} + + self._dot_graphs[name] = self._to_dot(graph, name) + self.dot_available = self._check_dot_availability() + + def _check_dot_availability(self): + """ + Check if graphviz is installed + """ + try: + # Run the 'dot' command and capture its output + result = subprocess.run( + ["dot", "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + # Check if the command was successful and the output contains version information + if result.returncode == 0 and "dot - graphviz" in result.stderr: + return True + except FileNotFoundError: + pass + return False + + def _get_node_style(self, node): + template = { + "shape": "record", + "fillcolor": "#CAFFE3", + "style": '"filled,rounded"', + "fontcolor": "#000000", + } + if node.op in _COLOR_MAP: + template["fillcolor"] = _COLOR_MAP[node.op] + else: + raise NotImplementedError("unknown node op") + if node.disabled: + template["fontcolor"] = "grey" + template["fillcolor"] = "white" + return template + + def _get_node_label(self, node): + label = "{" + f"name={node.name}|op={node.op}" + if node.op == "layout": + label += f"|fn={node.fn.__name__}" + for key in node.kwargs: + label += f"|{key}={node.kwargs[key]}" + if node.underlying_impl is not None: + label += f"|impl={type(node.underlying_impl).__name__}" + if node.op == "load": + label += f"|element_output={DataTypeTag[node.underlying_impl.element]}" + elif node.op == "compute": + label += f"|element_compute={DataTypeTag[node.underlying_impl.element_compute]}|element_output={DataTypeTag[node.underlying_impl.element_output]}" + elif node.op == "store": + label += f"|element_store={DataTypeTag[node.underlying_impl.element]}|element_output={DataTypeTag[node.underlying_impl.element_output]}" + elif node.op == "dag": + label += f"|element_output={DataTypeTag[node.underlying_impl.element_output]}" + if node.tensor is not None: + shape = node.tensor.shape + stride = node.tensor.stride + label += f"|shape={shape}|stride={stride}" + + if hasattr(node, "store_tensor"): + if node.store_tensor is not None: + store_shape = node.store_tensor.shape + store_stride = node.store_tensor.stride + label += f"|store_shape={store_shape}|stride_stride={store_stride}" + + label += "}" + return label + + def _to_dot( + self, + graph: DAGIR, + name: str + ): + dot_graph = pydot.Dot(name, randir="TB") + for node in graph.nodes_meta: + style = self._get_node_style(node) + label = self._get_node_label(node) + dot_node = pydot.Node( + node.name, label=label, **style + ) + dot_graph.add_node(dot_node) + if node.op == "dag": + dot_subgraph = self._to_dot(node.subgraph, name=node.name) + self._dot_graphs[node.name] = dot_subgraph + + # Add edges + for src, dst in graph.edges: + weight = graph.get_edge_weight(src, dst) + dot_graph.add_edge(pydot.Edge(src, dst, label=weight)) + + return dot_graph + + def get_dot_graph(self) -> pydot.Dot: + return [(key, self.get_dot_graph_by_name(key)) for key in self._dot_graphs.keys()] + + def get_dot_graph_by_name(self, name) -> pydot.Dot: + return self._dot_graphs[name] + + def get_main_dot_graph(self) -> pydot.Dot: + return self._dot_graphs[self._name] diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_argument_type.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_argument_type.py new file mode 100644 index 0000000000000000000000000000000000000000..1e09a612f84be738002f37fa4b054ff2e482175a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_argument_type.py @@ -0,0 +1,116 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Construct the epilogue visitor argument type +""" + +from cutlass.backend.c_types import visitor_factory +from cutlass.backend.evt.ir import TopoVisitorNode +from cutlass.backend.evt.passes.pass_dag_2_tree import PassDAG2Tree +from cutlass.backend.evt.passes.pass_get_impl import PassGetImpl +from cutlass.backend.evt.passes.pass_manager import EVTPassBase +from cutlass.backend.evt.passes.pass_shape_type_propagation import PassShapeTypePropagation + + +class PassGetArgumentType(EVTPassBase): + """ + Construct the epilogue visitor argument type + """ + dependencies = [ + PassShapeTypePropagation, # The Layout of all nodes must be set + PassDAG2Tree, # The type of each node must be set + PassGetImpl # The DAG subgraphs must be set + ] + + def requires(self) -> None: + # Check "D" is in the node list + if self.cc == 90 and (not self.dag_ir.has_node("D")): + raise SyntaxError( + "Sm90 EVT requires the epilogue to have a returned tensor D, " + "but the variable 'D' is not found in the return values.") + + def call(self): + nodes = self.dag_ir.nodes_topological_order() + self.argument_types = {} + for node in nodes: + meta = self.dag_ir.get_node_meta(node) + if not meta.disabled: + self.argument_types[node] = meta.underlying_impl.argument_type + if node == "D" and self.cc == 90: + continue + if isinstance(meta, TopoVisitorNode): + self.get_dag_argument_type(node) + else: + self.get_evt_argument_type(node) + + self.cc_specific_method(self.set_argument_type)() + + def get_evt_argument_type(self, node): + # Sort the input nodes by edge weight + input_types = [self.argument_types[child] for child in self.dag_ir.get_all_inputs(node)] + if len(input_types) > 0: + self.argument_types[node] = visitor_factory( + input_types + [self.argument_types[node],], self.dag_ir.get_all_inputs(node) + [node,]) + + def get_dag_argument_type(self, node): + meta = self.dag_ir.get_node_meta(node) + subgraph = meta.subgraph + subgraph_nodes = subgraph.nodes_topological_order() + # Visit the unvisited nodes in subgraph + for n in subgraph_nodes: + m = subgraph.get_node_meta(n) + if m.disabled: + continue + else: + self.argument_types[n] = m.underlying_impl.argument_type + input_types = [self.argument_types[child] for child in subgraph_nodes[:-1]] + if len(input_types) > 0: + self.argument_types[node] = visitor_factory(input_types, subgraph_nodes[:-1]) + + def set_argument_type(self): + pass + + def sm90_set_argument_type(self): + self.dag_ir.epilogue_thread_type = self.argument_types[self.dag_ir.get_all_inputs("D")[0]] + # Get the tensorD argument type + self.dag_ir.arg_d_type = self.dag_ir.get_node_meta("D").underlying_impl.argument_type_d + + # Get the tensorC argument type + if self.dag_ir.has_node("C"): + self.dag_ir.arg_c_type = self.dag_ir.get_node_meta("C").underlying_impl.argument_type_c + else: + self.dag_ir.arg_c_type = self.dag_ir.arg_d_type + + def sm80_set_argument_type(self): + nodes = self.dag_ir.nodes_topological_order() + self.dag_ir.epilogue_thread_type = self.argument_types[nodes[-1]] diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_dag_2_tree.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_dag_2_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..b8337924259a9c6f2c5b0ddf895a25f042f1fac4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_dag_2_tree.py @@ -0,0 +1,147 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Merge non-tree sub-graphs of the DAG IR into a single DAG. The fused DAG will be implemented +by the topological visitor, while the rest of the graph will be implemented with the tree visitor. +""" + +from copy import deepcopy + +from cutlass.backend.evt.ir import DAGIR, TopoVisitorNode +from cutlass.backend.evt.passes.pass_get_impl import PassGetImpl +from cutlass.backend.evt.passes.pass_manager import EVTPassBase +from cutlass.backend.evt.passes.pass_shape_type_propagation import PassShapeTypePropagation + + +class PassDAG2Tree(EVTPassBase): + """ + Convert the DAG IR to Tree by fusing subgraphs + """ + dependencies = [ + PassShapeTypePropagation, + PassGetImpl + ] + + def call(self): + # Step 1: find the nodes that have multiple parents + multi_parent_nodes = [] + + for node in self.dag_ir.nodes_topological_order(): + if self.dag_ir.out_degree(node) > 1: + multi_parent_nodes.append(node) + # Step 2: find the lowest common ancestor (LCA) of all its parents + for node in multi_parent_nodes: + # A multi-parent node could be already fused by the previous node + if not self.dag_ir.has_node(node): + continue + # A node uncovered by the previous fusions can have out degree change + # Case 1: it has <= 1 edges to the previously fused subgraph, no degree change + # Case 2: it has more than one edges to the previously fused subgraph, degree drops + if self.dag_ir.out_degree(node) <= 1: + continue + + # Otherwise, the node still + reachable_nodes = [] + # Complexity: O(Dout*N) + for parent in self.dag_ir.get_users(node): + reachable_nodes.append(set(self.dag_ir.all_reachable_nodes(parent))) + # get the common reachable objects + common_items = set.intersection(*reachable_nodes) + + # If common ancestor exists, find the lowest one + if len(common_items) > 0: + topo_order = self.dag_ir.nodes_topological_order() + lca = None + topo_idx = -1 + for item in common_items: + if lca is None: + lca = item + topo_idx = topo_order.index(item) + else: + if topo_idx > topo_order.index(item): + lca = item + topo_idx = topo_order.index(item) + # The lca is the output node of the DAG node + # Get the nodes to be fused + node_to_fuse = set.union(*reachable_nodes).difference(common_items) + node_to_fuse.add(lca) + # Get all the input nodes + all_input_nodes = [] + all_output_nodes = [] + for node in node_to_fuse: + all_input_nodes.append(set(self.dag_ir.get_all_inputs(node))) + all_output_nodes.append(set(self.dag_ir.get_users(node))) + all_input_nodes = set.union(*all_input_nodes) + all_output_nodes = set.union(*all_output_nodes) + + new_subgraph_nodes = set.union(node_to_fuse, all_input_nodes, all_output_nodes) + + # Create the subgraph + subgraph_ = self.dag_ir._graph.subgraph(new_subgraph_nodes) + subgraph = DAGIR() + for node in subgraph_.nodes: + meta = deepcopy(self.dag_ir.get_node_meta(node)) + if node not in node_to_fuse: + meta.disabled = True + subgraph.add_node(meta) + for edge in subgraph_.edges: + subgraph.add_edge(edge[0], edge[1], self.dag_ir.get_edge_weight(edge[0], edge[1])) + + + # Create the fused node + dag_node = TopoVisitorNode( + name=f"dag_{lca}", subgraph=subgraph, + output_node=self.dag_ir.get_node_meta(lca)) + self.dag_ir.add_node(dag_node) + + # Add input edges + for idx, node in enumerate(all_input_nodes): + self.dag_ir.add_edge(node, dag_node.name, weight=idx) + + # Replace all uses with DAG node (only 1 output node) + self.dag_ir.replace_all_uses_with(lca, dag_node.name) + + # Remove all fused nodes + node_to_fuse.remove(lca) + for node in node_to_fuse: + self.dag_ir.remove_node(node) + + else: + raise NotImplementedError("No LCA found. Consider SplitTreeVisitor.") + + def ensures(self) -> None: + # Ensure that after the pass, the resulting DAG becomes a tree + for node in self.dag_ir.nodes: + out_degree = self.dag_ir.out_degree(node) + if out_degree > 1: + raise RuntimeError(f"PassDAG2Tree failed. Node {node} still have outdegree = {out_degree}") diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_fix_element_d.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_fix_element_d.py new file mode 100644 index 0000000000000000000000000000000000000000..1e8e8604a4bc2ffe98d09d4173e7c513f3a4d759 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_fix_element_d.py @@ -0,0 +1,64 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Fix the element_output of producer of D. + +In Sm90 epilogue visitor, the node writing D to gmem does not have internal +element converter, so the compute node producing D must have element_output = type(D). +""" + +from cutlass.backend.evt.passes.pass_layout_elimination import PassLayoutManipulateElimination +from cutlass.backend.evt.passes.pass_manager import EVTPassBase + + +class PassFixElementD(EVTPassBase): + """ + In Sm90 epilogue visitor, the node writing D to gmem does not have internal + element converter, so the compute node producing D must have + element_output = type(D) + """ + dependencies = [ + PassLayoutManipulateElimination + ] + def get_producer(self, node, element_D): + node_meta = self.dag_ir.get_node_meta(node) + if node_meta.op == "compute": + node_meta.element_output = element_D + elif node_meta.op == "store": + self.get_producer(self.dag_ir.get_all_inputs(node)[0], element_D) + + def call(self): + if self.dag_ir.has_node("D"): + node_d_meta = self.dag_ir.get_node_meta("D") + element_D = node_d_meta.store_tensor.element + self.get_producer("D", element_D) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_get_impl.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_get_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..90c746071c5bc1f2a1995d8f3e6e78163b6343c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_get_impl.py @@ -0,0 +1,89 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Infer the underlying implement of each node. + +While the frontend only distinguish between Load/Store/Compute Node, +each of these nodes can have different underlying implementation based +on their layout. For instance, a LoadNode can be AuxLoad, Row/Col/Scalar broadcast, etc. +This pass infers the underlying impl of each node +""" + +import cutlass.backend.evt.backend as evt_backend +from cutlass.backend.evt.ir import DAGIR, LoadNode +from cutlass.backend.evt.passes.pass_fix_element_d import PassFixElementD +from cutlass.backend.evt.passes.pass_manager import EVTPassBase +from cutlass.backend.evt.passes.pass_no_op_elimination import PassNoOpElimination +from cutlass.backend.evt.passes.pass_shape_type_propagation import PassShapeTypePropagation + + +class PassGetImpl(EVTPassBase): + """ + While the frontend only distinguish between Load/Store/Compute Node, + each of these nodes can have different underlying implementation based + on their layout. For instance, a LoadNode can be AuxLoad, Row/Col/Scalar broadcast, etc. + This pass infers the underlying impl of each node + """ + dependencies = [ + PassShapeTypePropagation, # The shape and type info are required for inference + PassFixElementD + ] + + def __init__(self, dag_ir: DAGIR) -> None: + super().__init__(dag_ir) + self.no_op_elimination = PassNoOpElimination(dag_ir) + + def requires(self) -> None: + # Verify "accum" is in the arg list + if not self.dag_ir.has_node("accum"): + raise SyntaxError("Cannot find 'accum' in the argument list.") + + def call(self): + # The loop structure of the epilogue is determined by the + # accumulator shape + accumulator: LoadNode = self.dag_ir.get_node_meta("accum") + problem_size = accumulator.tensor.shape + + for node_meta in self.dag_ir.node_metas_topological_order(): + node_meta.get_underlying_impl(problem_size) + + def ensures(self) -> None: + # Some nodes will be lowered to NoOp, eliminate them + self.no_op_elimination() + # Lower to cc-specific impl + for node_meta in self.dag_ir.nodes_meta: + node_impl_ccs = getattr(evt_backend, f"sm{self.cc}_nodes") + node_meta.underlying_impl = getattr( + node_impl_ccs, + f"Sm{self.cc}" + node_meta.underlying_impl.__class__.__name__ + )(node_meta) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_layout_elimination.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_layout_elimination.py new file mode 100644 index 0000000000000000000000000000000000000000..435a0cbf34f36d49bffda3f39fecbe9f6473eb18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_layout_elimination.py @@ -0,0 +1,217 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Eliminate layout manipulation nodes +""" + +from copy import deepcopy + +from cutlass.backend.evt.ir import DAGIR, LayoutNode +from cutlass.backend.evt.passes.pass_manager import EVTPassBase +from cutlass.backend.evt.passes.pass_shape_type_propagation import PassShapeTypePropagation + + +class PassLayoutManipulateElimination(EVTPassBase): + """ + Eliminate layout manipulation nodes + """ + dependencies = [PassShapeTypePropagation] + + def __init__(self, dag_ir: DAGIR) -> None: + super().__init__(dag_ir) + self.copy_cnt = 0 + + def call(self): + self.layout_nodes_worklist = self.get_all_layout_nodes() + # Run while loop utill all layout nodes are eliminated + while(len(self.layout_nodes_worklist) > 0): + node = self.layout_nodes_worklist.pop(0) + # for node in layout_nodes: + # Step 1: get the propagation direction + direction = self.get_propagation_direction(node) + self.visited = [] + getattr(self, f"propagate_to_{direction}")(self.dag_ir.get_node_meta(node), node) + # Eliminate the current node + input_node = self.dag_ir.get_all_inputs(node)[0] + self.dag_ir.replace_all_uses_with(node, input_node) + # layout_nodes = self.get_all_layout_nodes() + + def get_all_layout_nodes(self): + layout_nodes = [] + for node_meta in reversed(self.dag_ir.node_metas_topological_order()): + if isinstance(node_meta, LayoutNode): + layout_nodes.append(node_meta.name) + return layout_nodes + + def get_propagation_direction(self, node: str): + """ + The logic is propagating all layout nodes away from the accumulator node. + """ + self.visited = [] + self.get_influenced_users(node) + nodes_influenced_dir_users = self.visited + self.visited = [] + self.get_influenced_inputs(node) + nodes_influenced_dir_inputs = self.visited + + if "accum" in nodes_influenced_dir_users and "accum" not in nodes_influenced_dir_inputs: + return "inputs" + elif "accum" not in nodes_influenced_dir_users and "accum" in nodes_influenced_dir_inputs: + return "users" + else: + raise RuntimeError("Unsolved propagation direction") + + # Get all influenced nodes if we propagate along the user direction + def get_influenced_users(self, node: str): + if node in self.visited: + return + self.visited.append(node) + + users = self.dag_ir.get_users(node) + for user in users: + self.get_influenced_users(user) + user_inputs = [] + for user in users: + user_inputs.append(set(self.dag_ir.get_all_inputs(user))) + if len(user_inputs) > 0: + user_inputs = set.union(*user_inputs) + user_inputs.remove(node) + for input in user_inputs: + self.get_influenced_inputs(input) + + # Get all influenced nodes if we propagate along the input direction + def get_influenced_inputs(self, node: str): + if node in self.visited: + return + self.visited.append(node) + + inputs = self.dag_ir.get_all_inputs(node) + for input in inputs: + self.get_influenced_inputs(input) + input_users = [] + for input in inputs: + input_users.append(set(self.dag_ir.get_users(input))) + if len(input_users) > 0: + input_users = set.union(*input_users) + input_users.remove(node) + for user in input_users: + self.get_influenced_users(user) + + def add_copy_before(self, layout_node_meta: LayoutNode, target: str): + copied_node_meta = deepcopy(layout_node_meta) + copied_node = f"{copied_node_meta.name}_copy{self.copy_cnt}" + self.copy_cnt += 1 + copied_node_meta.name = copied_node + self.dag_ir.add_node(copied_node_meta) + # Add edges + target_inputs = self.dag_ir.get_all_inputs(target) + for src in target_inputs: + self.dag_ir.remove_edge(src, target) + self.dag_ir.add_edge(src, copied_node) + self.dag_ir.add_edge(copied_node, target) + self.layout_nodes_worklist.append(copied_node) + + def add_copy_after(self, layout_node_meta: LayoutNode, target: str): + copied_node_meta = deepcopy(layout_node_meta) + copied_node = f"{copied_node_meta.name}_copy{self.copy_cnt}" + self.copy_cnt += 1 + copied_node_meta.name = copied_node + self.dag_ir.add_node(copied_node_meta) + # Add edges + users = self.dag_ir.get_users(target) + for user in users: + self.dag_ir.remove_edge(target, user) + self.dag_ir.add_edge(copied_node, user) + self.dag_ir.add_edge(target, copied_node) + self.layout_nodes_worklist.append(copied_node) + + # Propagate the layout `node` along the user direction + def propagate_to_users(self, layout_node_meta: LayoutNode, node: str): + """ + Propagate layout node to users + """ + if node in self.visited: + # Avoid applying twice + return + self.visited.append(node) + + node_meta = self.dag_ir.get_node_meta(node) + if layout_node_meta.name != node: + if isinstance(node_meta, LayoutNode): + # Layout node is not transparent with layout node + self.add_copy_before(layout_node_meta, node) + return + else: + layout_node_meta.apply_to_user(node_meta) + + users = self.dag_ir.get_users(node) + user_inputs = [] + for user in users: + user_inputs.append(set(self.dag_ir.get_all_inputs(user))) + for user in users: + self.propagate_to_users(layout_node_meta, user) + if len(user_inputs) > 0: + user_inputs = set.union(*user_inputs) + user_inputs.remove(node) + for input in user_inputs: + self.propagate_to_inputs(layout_node_meta.get_inverse_node(), input) + + # Propagate the layout `node` along the input direction + def propagate_to_inputs(self, layout_node_meta: LayoutNode, node: str): + """ + Propagate layout node to inputs + """ + if node in self.visited: + # Avoid applying twice + return + self.visited.append(node) + + node_meta = self.dag_ir.get_node_meta(node) + if layout_node_meta.name != node: + if isinstance(node_meta, LayoutNode): + # Layout node is not transparent with layout node + self.add_copy_after(layout_node_meta, node) + return + else: + layout_node_meta.apply_to_input(node_meta) + inputs = self.dag_ir.get_all_inputs(node) + input_users = [] + for input in inputs: + input_users.append(set(self.dag_ir.get_users(input))) + for input in inputs: + self.propagate_to_inputs(layout_node_meta, input) + if len(input_users) > 0: + input_users = set.union(*input_users) + input_users.remove(node) + for user in input_users: + self.propagate_to_users(layout_node_meta.get_inverse_node(), user) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_manager.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..4fa31a8bd229b663bb861c522a374840da270325 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_manager.py @@ -0,0 +1,163 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Pass manager for DAG IR. +""" + +from typing import Any + +import networkx as nx + +from cutlass.backend.evt.ir import DAGIR + + +class EVTPassBase: + """ + Base class for EVT Passes + """ + dependencies = [] + def __init__(self, dag_ir: DAGIR) -> None: + self.dag_ir = dag_ir + self.cc = self.dag_ir.cc + + def requires(self) -> None: + """ + This function will be called before the pass is run. + """ + pass + + def call(self) -> None: + """ + The pass that is run through the self.dag_ir + """ + raise NotImplementedError( + f"__call__ is not overwritten in Pass {self.__class__.__name__}") + + def ensures(self) -> None: + """ + This function will be called after the pass is run. + """ + pass + + def __call__(self) -> Any: + self.requires() + self.call() + self.ensures() + + def cc_specific_method(self, func): + """ + This enables defining function that behaves differently under different cc + The simplest example of using this function is the following + + .. highlight:: python + .. code-block:: python + + class ExamplePass(EVTPassBase): + + def call(sekf): + # This automatically select the smXX_func based on current cc + self.cc_specific_method(self.func)() + + # Interface func, can be empty + def func(self): + pass + + # Sm90 specific func + def sm90_func(self): + // sm90 specific method + return + + # Sm80 specific func + def sm80_func(self): + // sm80 specific method + return + """ + func_name = f"sm{self.cc}_{func.__name__}" + if hasattr(self, func_name): + return getattr(self, func_name) + else: + raise NotImplementedError(f"func {func.__name__} is not overwritten for Sm{self.cc}") + + +class EVTPassManager(nx.DiGraph): + """ + Topological-based Pass Manager. + Each registered pass has a list of dependencies. The pass manager organizes + the passes as a DAG and launch the compiler passes under topological order. + """ + def __init__(self, dag_ir: DAGIR, pass_list): + super().__init__() + self.dag_ir = dag_ir + for pass_cls in pass_list: + self.add_pass(pass_cls) + + self.sorted_passes = self.schedule() + + def get_callable(self, pass_name): + """ + Return the callable of the pass + """ + return self.nodes[pass_name]["callable"] + + def add_pass(self, pass_cls): + """ + Add a pass to the pass manager + :param pass_cls: the class of pass + :type pass_cls: derived class of EVTPassBase + """ + name = pass_cls.__name__ + pass_callable = pass_cls(self.dag_ir) + self.add_node(name, callable=pass_callable) + + def schedule(self): + """ + Schedule the added passes under topological order + """ + # Add edges + for pass_name in self.nodes: + callable = self.get_callable(pass_name) + for dependency_cls in callable.dependencies: + self.add_edge( + dependency_cls.__name__, + type(callable).__name__) + + # Topological sort + return list(nx.topological_sort(self)) + + def __call__(self) -> Any: + """ + Launch the registered passes + """ + for pass_name in self.sorted_passes: + callable = self.get_callable(pass_name) + callable() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_no_op_elimination.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_no_op_elimination.py new file mode 100644 index 0000000000000000000000000000000000000000..df63ecfc28743067e4ada847e94aea81935b6e3f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_no_op_elimination.py @@ -0,0 +1,53 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +No op elimination node +""" + +from typing import Any + +from cutlass.backend.evt.ir import NoOpImpl +from cutlass.backend.evt.passes.pass_manager import EVTPassBase + + +class PassNoOpElimination(EVTPassBase): + """ + The dead node elimination pass removes nodes with NoOpImpl in DAG IR + """ + dependencies = [] + + def call(self) -> Any: + for node in self.dag_ir.nodes_topological_order(): + node_meta = self.dag_ir.get_node_meta(node) + if isinstance(node_meta.underlying_impl, NoOpImpl): + self.dag_ir.replace_all_uses_with(node, self.dag_ir.get_all_inputs(node)[0]) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_preprocess_red.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_preprocess_red.py new file mode 100644 index 0000000000000000000000000000000000000000..afb8a9c46d9fe3e4de9b9da6317a11d8d268ff09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_preprocess_red.py @@ -0,0 +1,98 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Preprocess the reduction nodes. + +The parser treats reduction as Compute(op=(reg_reduce_fn, gmem_reduce_fn)) - Store() +This pass fuses these into a single store node, and then replaces all uses of the +current node with the new store node. +""" + +from cutlass.backend.evt.ir import ComputeNode, StoreNode +from cutlass.backend.evt.passes.pass_manager import EVTPassBase + + + +class PassPreprocessRed(EVTPassBase): + """ + Preprocess red nodes + """ + + def call(self): + # Step 1: find the compute nodes with op=red + red_compute_nodes = [] + for node_meta in self.dag_ir.nodes_meta: + if isinstance(node_meta, ComputeNode): + if type(node_meta.fn) == tuple: + # To keep the frontend simple, the reduction nodes + # are parsed into compute nodes by default + # The simple heuristic to distinguish between compute + # and reduction node is that compute node is a single function, + # while the reduction node is a tuple of functions for + # in-register reduction and atomic global memory reduction + red_compute_nodes.append(node_meta.name) + + # Step 2: for each compute, merge it with the succeeding store + for node in red_compute_nodes: + # Verify + users = self.dag_ir.get_users(node) + inputs = self.dag_ir.get_all_inputs(node) + # Has a single user + assert len(users) == 1 + assert len(inputs) == 1 + user = users[0] + input = inputs[0] + + user_meta = self.dag_ir.get_node_meta(user) + # Must be a store node + assert isinstance(user_meta, StoreNode) + # With output degree == 0 + assert self.dag_ir.out_degree(user) == 0 + # Register the reduce op + node_meta = self.dag_ir.get_node_meta(node) + user_meta.reg_reduce_fn, user_meta.gmem_reduce_fn = node_meta.fn + user_meta.element_compute = node_meta.element_compute + user_meta.round_style = node_meta.round_style + + # Replace all uses + self.dag_ir.remove_edge(input, node) + input_users = self.dag_ir.get_users(input) + for iu in input_users: + weight = self.dag_ir.get_edge_weight(input, iu) + self.dag_ir.add_edge(user, iu, weight) + self.dag_ir.remove_edge(input, iu) + self.dag_ir.add_edge(input, user) + self.dag_ir.remove_node(node) + + # Register the reduction name + self.dag_ir.reduction_names.append(user) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_shape_type_propagation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_shape_type_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..a4f66ced4630e27a18523de9a2cd961c9a9dfe8a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/pass_shape_type_propagation.py @@ -0,0 +1,59 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Shape and type propagation pass +""" + +from cutlass.backend.evt.ir.node import NodeBase +from cutlass.backend.evt.passes.pass_manager import EVTPassBase +from cutlass.backend.evt.passes.pass_preprocess_red import PassPreprocessRed + + +class PassShapeTypePropagation(EVTPassBase): + """ + Propagate the shape and type of all nodes + """ + dependencies = [PassPreprocessRed] + + def call(self): + # Propagate the node shape and type + for node in self.dag_ir.nodes_topological_order(): + node_meta: NodeBase = self.dag_ir.get_node_meta(node) + input_node_metas = self.dag_ir.get_all_inputs_meta(node) + node_meta.type_propagation(input_node_metas) + node_meta.shape_propagation(input_node_metas) + + for node in reversed(self.dag_ir.nodes_topological_order()): + node_meta: NodeBase = self.dag_ir.get_node_meta(node) + input_node_metas = self.dag_ir.get_all_inputs_meta(node) + node_meta.broadcast_propagation(input_node_metas) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/smem_size_calculator.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/smem_size_calculator.py new file mode 100644 index 0000000000000000000000000000000000000000..670367d075c1f9ea0c2f367155e21d5311cfb95c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/evt/passes/smem_size_calculator.py @@ -0,0 +1,200 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Compute the shared memory size in bytes +""" + +from pycute import shape_div, product + +import cutlass +from cutlass.backend.evt.ir import TopoVisitorNode, DAGIR +from cutlass.backend.library import DataTypeSize + + +class GetSmemSize: + """ + Get the size in byte of shared memory used by the kernel + """ + def __init__(self, dag_ir: DAGIR) -> None: + self.dag_ir = dag_ir + self.cc = self.dag_ir.cc + + # + # Sm90 epilogue specific + # + + def sm90_epilogue_tile(self, tile_description): + # Get the epilogue tile size + schedule = tile_description.epilogue_schedule + if schedule == cutlass.EpilogueScheduleType.TmaWarpSpecialized: + epilogue_tile_mn = (64, 32) + elif schedule == cutlass.EpilogueScheduleType.TmaWarpSpecializedCooperative: + epilogue_tile_mn = (128, 32) + else: + raise NotImplementedError(f"Unsupported schedule: {schedule}") + + # Get the pipeline stages + stages_d = 2 + epi_tiles = product(shape_div(tuple(tile_description.threadblock_shape)[:2], epilogue_tile_mn)) + if self.dag_ir.has_node("C"): + element_c = self.dag_ir.get_node_meta("C").element + else: + element_c = None + + element_d = self.dag_ir.get_node_meta("D").element + if element_c == element_d: + reuse_smem_c = True + else: + reuse_smem_c = False + stages_c = max(epi_tiles, stages_d + 1) if reuse_smem_c else epi_tiles + + # Record the epilogue tile + self.cta_tile_mnk = tuple(tile_description.threadblock_shape) + self.epilogue_tile_mn = epilogue_tile_mn + self.epi_tiles = epi_tiles + self.stages_c = stages_c + self.stages_d = stages_d + self.reuse_smem_c = reuse_smem_c + self.element_c = element_c + self.element_d = element_d + self.is_source_supported = element_c is not None + + def sm90_epilogue_smem_size(self, tile_description): + """ + Compute the shared memory size of sm90 collective epilogue + """ + self.sm90_epilogue_tile(tile_description) + # Get the Fusion Storage + nodes = self.dag_ir.nodes_topological_order() + self.smem_types = {} + for node in nodes: + meta = self.dag_ir.get_node_meta(node) + if not meta.disabled: + self.smem_types[node] = meta.underlying_impl.get_smem_size( + self.cta_tile_mnk, self.epilogue_tile_mn, + self.stages_c, self.stages_d, self.epi_tiles) + if node == "D": + continue + if isinstance(meta, TopoVisitorNode): + self.get_dag_smem_type(node) + else: + self.get_evt_smem_type(node) + + thread_smem_size = self.smem_types[self.dag_ir.get_all_inputs("D")[0]][0] + # Get the Tensor Storage + tensors = [] + if self.is_source_supported: + smem_C = DataTypeSize[self.element_c] * product(self.epilogue_tile_mn) * self.stages_c // 8 + tensors.append((smem_C, 128)) + else: + tensors.append((0, 1)) + if self.reuse_smem_c: + tensors.append((0, 128)) + else: + smem_D = DataTypeSize[self.element_d] * product(self.epilogue_tile_mn) * self.stages_d // 8 + tensors.append((smem_D, 128)) + tensors.append((thread_smem_size, 128)) + + tensor_smem_size = self.get_struct_size(tensors) + # Get pipeline storage size + # sizeof(uint64_t * stages_c * 2), alignment of uint64_t + # 2 is for FullBarrier and EmptyBarrier + pipeline_smem_size = (8 * self.stages_c * 2, 8) + + # get SharedStorage size + smem_size = self.get_struct_size([tensor_smem_size, pipeline_smem_size]) + return smem_size[0] + + def __call__(self, tile_description): + return getattr(self, f"sm{self.cc}_epilogue_smem_size")(tile_description) + + # + # Helper functions + # + + @staticmethod + def get_visitor_size(members: list, ebo: bool): + """ + Get the size of struct in bytes + """ + offset = 0 + max_alignment = 1 + if len(members) > 0: + # Get alignment + for _, alignment in members: + max_alignment = max(max_alignment, alignment) + + for type_size, _ in members: + if type_size != 0: + offset = ((offset + max_alignment - 1) // max_alignment) * max_alignment + if type_size == 0 and not ebo: + offset += 1 + else: + offset += type_size + offset = ((offset + max_alignment - 1) // max_alignment) * max_alignment + return (offset, max_alignment) + else: + # Struct size is at least 1 + return (1, 1) + + def get_struct_size(self, members: list): + """ + Get the size of struct in bytes + """ + return self.get_visitor_size(members, False) + + def get_evt_smem_type(self, node): + # Sort the input nodes by edge weight + input_types = [self.smem_types[child] for child in self.dag_ir.get_all_inputs(node)] + input_types.append(self.smem_types[node]) + if len(input_types) > 1: + ebo = len(input_types) > 4 + self.smem_types[node] = self.get_visitor_size(input_types, ebo) + + def get_dag_smem_type(self, node): + meta = self.dag_ir.get_node_meta(node) + subgraph = meta.subgraph + subgraph_nodes = subgraph.nodes_topological_order() + # Visit the unvisited nodes in subgraph + for n in subgraph_nodes: + m = subgraph.get_node_meta(n) + if m.disabled: + continue + else: + self.smem_types[n] = m.underlying_impl.get_smem_size( + self.cta_tile_mnk, self.epilogue_tile_mn, + self.stages_c, self.stages_d, self.epi_tiles) + input_types = [self.smem_types[child] for child in subgraph_nodes[:-1]] + if len(input_types) > 0: + ebo = len(input_types) > 4 + self.smem_types[node] = self.get_visitor_size(input_types, ebo) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/frontend.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/frontend.py new file mode 100644 index 0000000000000000000000000000000000000000..a43dcbb00bd7c3766ae8d5e046d2ce65cd603ee2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/frontend.py @@ -0,0 +1,114 @@ +################################################################################ +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################ + +from cuda import cuda +import numpy as np + +from cutlass.backend.memory_manager import device_mem_alloc, todevice +from cutlass.backend.utils.software import CheckPackages + +torch_available = CheckPackages().check_torch() +if torch_available: + import torch + +cupy_available = CheckPackages().check_cupy() +if cupy_available: + import cupy as cp + + +class NumpyFrontend: + """ + Frontend node for numpy + """ + + @staticmethod + def argument(np_tensor: "np.ndarray", is_output: "bool") -> cuda.CUdeviceptr: + """Convert the input numpy tensor to CUDA device pointer + + :param np_tensor: input numpy nd array + :param is_output: whether the tensor is output + + :return: CUDA device pointer + """ + # copy the data to device + if is_output: + return device_mem_alloc(np_tensor.size * np_tensor.itemsize) + else: + return todevice(np_tensor) + + +class TorchFrontend: + """ + Frontend node for torch + """ + + @staticmethod + def argument(torch_tensor: "torch.Tensor") -> cuda.CUdeviceptr: + """Convert the input torch tensor to CUDA device pointer + + :param torch_tensor: input torch tensor + :param is_output: whether the tensor is output + + :return: CUDA device pointer + """ + + # check the device of torch_tensor + if not torch_tensor.is_cuda: + torch_tensor = torch_tensor.to("cuda") + + return cuda.CUdeviceptr(torch_tensor.data_ptr()) + + +class CupyFrontend: + """ + Frontend node for cupy + """ + + @staticmethod + def argument(cupy_ndarray: "cp.ndarray"): + return cuda.CUdeviceptr(int(cupy_ndarray.data.ptr)) + +class TensorFrontend: + """ + Universal Frontend for client-provide tensors + """ + + @staticmethod + def argument(tensor, is_output=False): + if isinstance(tensor, np.ndarray): + return NumpyFrontend.argument(tensor, is_output) + elif torch_available and isinstance(tensor, torch.Tensor): + return TorchFrontend.argument(tensor) + elif cupy_available and isinstance(tensor, cp.ndarray): + return CupyFrontend.argument(tensor) + else: + raise NotImplementedError("Unknown Tensor Type") diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/gemm_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/gemm_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..8bbf402418c9ecc6173bdd0e3a19bdaa8689ecec --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/gemm_operation.py @@ -0,0 +1,2109 @@ +################################################################################ +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################ + +import copy +import ctypes +import enum + +from cuda import cuda, cudart +import numpy as np +import rmm + +from cutlass import ( + ComplexTransformTag, + DataType, + DataTypeNames, + DataTypeSize, + DataTypeTag, + EpilogueScheduleSuffixes, + EpilogueScheduleTag, + EpilogueScheduleType, + GemmKind, + GemmKindNames, + GemmUniversalMode, + KernelScheduleSuffixes, + KernelScheduleTag, + KernelScheduleType, + LayoutTag, + LayoutType, + MathOperation, + MathOperationTag, + OpcodeClass, + OpcodeClassNames, + OpcodeClassTag, + OperationKind, + ShortComplexLayoutNames, + ShortDataTypeNames, + ShortLayoutTypeNames, + SwizzlingFunctor, + SwizzlingFunctorTag, + TileSchedulerSuffixes, + TileSchedulerTag, + TileSchedulerType, + get_complex_from_real +) +from cutlass.backend.arguments import ArgumentBase +from cutlass.backend.c_types import ( + GemmCoord_, + GemmCoordBatched_, + GenericMainloopArguments3x_, + StrideBatched_, + dim3_, + get_gemm_arguments, + get_gemm_arguments_3x, + get_gemm_arguments_streamk, + get_gemm_grouped_arguments, + get_mainloop_arguments_3x +) +from cutlass.backend.library import ( + ApiVersion, + EmissionType, + SchedulerMode, + SchedulerModeTag, + TensorDescription, + TileDescription, + api_version, +) +from cutlass.backend.memory_manager import device_mem_alloc, todevice +from cutlass.backend.operation import ExecutableOperation, LaunchConfiguration +from cutlass.backend.type_hint import GemmOperation, Tensor +from cutlass.backend.utils.software import ( + CheckPackages, + SubstituteTemplate, + device_sm_count, +) +from cutlass.shape import GemmCoord, MatrixCoord + + +################################################################################ +# +# Data structure modeling a GEMM operation +# +################################################################################ + + +def leading_dimension(layout: LayoutType, shape: MatrixCoord) -> int: + """ + Returns the leading dimenson of a tensor with layout ``layout`` and shape ``shape``. + + :param layout: layout of the tensor + :type layout: cutlass.shape.LayoutType + :param shape: shape of the tensor + :type shape: cutlass.shape.MatrixCoord + + :return: leading dimension of the tensor + :rtype: int + """ + if layout == LayoutType.RowMajor: + return shape.column + elif layout == LayoutType.ColumnMajor: + return shape.row + + +def transpose_layout(layout: LayoutType) -> LayoutType: + if layout == LayoutType.ColumnMajor: + return LayoutType.RowMajor + elif layout == LayoutType.RowMajor: + return LayoutType.ColumnMajor + else: + raise ValueError(f"Unsupported Layout {layout}") + + +class GemmArguments2x(ArgumentBase): + """ + Argument wrapper for GEMM in CUTLASS 2. It encodes problem information and + user-provide tensors into the kernel's argument + + :param operation: the GEMM operation to take the argument + :type operation: :class:`cutlass.backend.GemmOperationUniversal` | + :class:`cutlass.backend.GemmOperationGrouped` + + :param problem_size: GEMM problem size gemm(M, N, K) + :type operation: :class:`cutlass.shape.GemmCoord` + + :param A: tensor A + :type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param B: tensor B + :type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param C: tensor C + :type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param D: tensor D + :type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param gemm_mode: GEMM mode + :type gemm_mode: :class:`cutlass.GemmUniversalMode` + + :param output_op: output operator, optional + :type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments` + """ + + def __init__(self, operation, problem_size, A, B, C, D, gemm_mode=GemmUniversalMode.Gemm, **kwargs): + self.operation = operation + + self.layout_A = operation.A.layout + self.layout_B = operation.B.layout + self.layout_C = operation.C.layout + + self.element_A = operation.A.element + self.element_B = operation.B.element + self.element_C = operation.C.element + + if operation.C.layout in [LayoutType.RowMajorInterleaved32, LayoutType.ColumnMajorInterleaved32]: + raise Exception("Interleaved layout not currently supported") + + if hasattr(self.operation.epilogue_functor, "visitor") and operation.arch != 90: + super().__init__(A, B, None, None, **kwargs) + else: + super().__init__(A, B, C, D, **kwargs) + + if operation.switched: + self.problem_size = GemmCoord(problem_size.n, problem_size.m, problem_size.k) + self.ptr_A, self.ptr_B = self.ptr_B, self.ptr_A + else: + self.problem_size = problem_size + # If the number of elements in C = problem_size.n, C is treated as the bias + if hasattr(self, "tensor_c_numel"): + if self.tensor_c_numel == self.problem_size.n and self.problem_size.m != 1: + self.bias = True + + self.lda = leading_dimension(self.layout_A, self.problem_size.mk) + self.ldb = leading_dimension(self.layout_B, self.problem_size.kn) + self.ldc = leading_dimension(self.layout_C, self.problem_size.mn) + self.ldd = self.ldc + + if self.bias: + self.ldc = 0 + + if "output_op" in kwargs.keys() and gemm_mode != GemmUniversalMode.GemmSplitKParallel: + self.output_op = kwargs["output_op"] + else: + if self.operation.epilogue_functor.element_epilogue in [DataType.s8, DataType.s32, DataType.u8, DataType.u32]: + dtype = int + else: + dtype = float + self.output_op = self.operation.epilogue_type(dtype(1.0), dtype(0.0)) + + self.gemm_mode = gemm_mode + if gemm_mode in [GemmUniversalMode.Gemm, GemmUniversalMode.GemmSplitKParallel]: + if "split_k_slices" in kwargs.keys(): + self.batch_count = kwargs["split_k_slices"] + else: + self.batch_count = 1 + self.split_k_slices = self.batch_count + + if gemm_mode in [GemmUniversalMode.Batched, GemmUniversalMode.Array]: + if "batch" in kwargs.keys(): + self.batch_count = kwargs["batch"] + else: + self.batch_count = 1 + + if "batch_strides" in kwargs: + self.batched_stride_A = kwargs["batch_strides"]["A"] + self.batched_stride_B = kwargs["batch_strides"]["B"] + self.batched_stride_C = kwargs["batch_strides"]["C"] + self.batched_stride_D = kwargs["batch_strides"]["D"] + else: + self.batched_stride_A = self.problem_size.m * self.problem_size.k + self.batched_stride_B = self.problem_size.n * self.problem_size.k + self.batched_stride_C = self.problem_size.m * self.problem_size.n + self.batched_stride_D = self.problem_size.m * self.problem_size.n + + if self.bias: + self.batched_stride_C = self.problem_size.n + + if gemm_mode == GemmUniversalMode.Array: + self.ptr_A_array = [] + self.ptr_B_array = [] + self.ptr_C_array = [] + self.ptr_D_array = [] + + ptr_A_addr = int(self.ptr_A) + ptr_B_addr = int(self.ptr_B) + ptr_C_addr = int(self.ptr_C) + ptr_D_addr = int(self.ptr_D) + + stride_A = self.batched_stride_A * DataTypeSize[self.element_A] // 8 + stride_B = self.batched_stride_B * DataTypeSize[self.element_B] // 8 + stride_C = self.batched_stride_C * DataTypeSize[self.element_C] // 8 + stride_D = self.batched_stride_D * DataTypeSize[self.element_C] // 8 + for _ in range(self.batch_count): + self.ptr_A_array.append(ptr_A_addr) + self.ptr_B_array.append(ptr_B_addr) + self.ptr_C_array.append(ptr_C_addr) + self.ptr_D_array.append(ptr_D_addr) + + ptr_A_addr += stride_A + ptr_B_addr += stride_B + ptr_C_addr += stride_C + ptr_D_addr += stride_D + + self.ptr_A_array_buffer = todevice(self.ptr_A_array, dtype=np.int64) + self.ptr_B_array_buffer = todevice(self.ptr_B_array, dtype=np.int64) + self.ptr_C_array_buffer = todevice(self.ptr_C_array, dtype=np.int64) + self.ptr_D_array_buffer = todevice(self.ptr_D_array, dtype=np.int64) + + if isinstance(self.operation, GemmOperationUniversal): + self.initialize() + + def get_arguments(self): + problem_size_ = self.problem_size.ctype + grid_tiled_shape_ = GemmCoord( + self.grid_tiled_shape.x, + self.grid_tiled_shape.y, + self.grid_tiled_shape.z ).ctype + + if self.gemm_mode == GemmUniversalMode.Array: + arguments = self.operation.argument_type( + # Arguments from UniversalArgumentsBase + self.gemm_mode, + problem_size_, + self.batch_count, + 0, + # Remaining arguments + self.output_op, + int(self.ptr_A_array_buffer.ptr), + int(self.ptr_B_array_buffer.ptr), + int(self.ptr_C_array_buffer.ptr), + int(self.ptr_D_array_buffer.ptr), + 0, 0, 0, + self.lda, self.ldb, self.ldc, self.ldd, + self.lda, self.ldb, self.ldc, self.ldd, + 0, 0, 0 + ) + else: + arguments = self.operation.argument_type( + # Arguments from UniversalArgumentsBase + self.gemm_mode, problem_size_, self.batch_count, self.batched_stride_D, + # Remaining arguments + self.output_op, + int(self.ptr_A), + int(self.ptr_B), + int(self.ptr_C), + int(self.ptr_D), + self.batched_stride_A, + self.batched_stride_B, + self.batched_stride_C, + self.lda, self.ldb, self.ldc, self.ldd, + self.lda, self.ldb, self.ldc, self.ldd, + 0, 0, 0 + ) + + self.arguments = arguments, grid_tiled_shape_, self.gemm_k_size + + def initialize(self): + launch_config = self.operation.rt_module.plan(self) + + # Get the host and evice workspace + device_workspace_size = self.operation.rt_module.get_device_workspace_size(self) + + if device_workspace_size > 0: + self.workspace_buffer = device_mem_alloc(device_workspace_size) + workspace_ptr = self.workspace_buffer.ptr + err, = cuda.cuMemsetD32( + workspace_ptr, 0, device_workspace_size // 4) + else: + workspace_ptr = None + + device_workspace = 0 + if workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.GemmSplitKParallel: + # In GEMM splik-K parallel, the D pointer is redirected to the workspace + self.ptr_D = cuda.CUdeviceptr(workspace_ptr) + elif workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.Gemm: + device_workspace = workspace_ptr + + self.get_arguments() + + arguments, grid_tiled_shape, gemm_k_size = self.arguments + res_arg = self.operation.rt_module.get_args( + ctypes.byref(arguments), ctypes.c_void_p(int(device_workspace))) + host_workspace = bytearray(res_arg.contents) + + device_workspace = None + + self.host_workspace = host_workspace + self.device_workspace = device_workspace + self.launch_config = launch_config + + def sync(self, stream_sync=True): + super().sync(stream_sync) + if hasattr(self.output_op, "sync"): + self.output_op.sync() + + +class GemmArguments2xStreamK(GemmArguments2x): + """ + Argument wrapper for stream-K GEMMs in CUTLASS 2. It encodes problem information and + user-provide tensors into the kernel's argument + + :param operation: the GEMM operation to take the argument + :type operation: :class:`cutlass.backend.GemmOperationUniversal` | + :class:`cutlass.backend.GemmOperationGrouped` + + :param problem_size: GEMM problem size gemm(M, N, K) + :type operation: :class:`cutlass.shape.GemmCoord` + + :param A: tensor A + :type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param B: tensor B + :type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param C: tensor C + :type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param D: tensor D + :type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param gemm_mode: GEMM mode + :type gemm_mode: :class:`cutlass.GemmUniversalMode` + + :param output_op: output operator, optional + :type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments` + """ + + def __init__(self, operation, problem_size, A, B, C, D, gemm_mode=GemmUniversalMode.Gemm, **kwargs): + if gemm_mode not in [GemmUniversalMode.Gemm, GemmUniversalMode.Batched]: + raise Exception(f"Unsupported GEMM mode {gemm_mode}.") + + super().__init__(operation, problem_size, A, B, C, D, gemm_mode, **kwargs) + + def get_arguments(self): + batch_stride_A = self.problem_size.m * self.problem_size.k + batch_stride_B = self.problem_size.k * self.problem_size.n + batch_stride_C = self.problem_size.m * self.problem_size.n + batch_stride_D = self.problem_size.m * self.problem_size.n + + arguments = self.operation.argument_type( + self.gemm_mode, + GemmCoord_(self.problem_size.m, self.problem_size.n, self.problem_size.k), + self.batch_count, + self.output_op, + int(self.ptr_A), + int(self.ptr_B), + int(self.ptr_C), + int(self.ptr_D), + batch_stride_A, + batch_stride_B, + batch_stride_C, + batch_stride_D, + self.lda, self.ldb, self.ldc, self.ldd, # strides + self.lda, self.ldb, self.ldc, self.ldd, + -1, # avail_sms + ) + return arguments + + def initialize(self): + # Get the host and device workspace + device_workspace_size = self.operation.rt_module.get_device_workspace_size(self) + + device_workspace_size = 10 << 20 + if device_workspace_size > 0: + self.workspace_buffer = device_mem_alloc(device_workspace_size) + workspace_ptr = self.workspace_buffer.ptr + err, = cuda.cuMemsetD32( + workspace_ptr, 0, device_workspace_size // 4) + else: + workspace_ptr = None + + device_workspace = 0 + if workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.GemmSplitKParallel: + # In GEMM splik-K parallel, the D pointer is redirected to the workspace + self.ptr_D = cuda.CUdeviceptr(workspace_ptr) + elif workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.Gemm: + device_workspace = workspace_ptr + + arguments = self.get_arguments() + + res_arg = self.operation.rt_module.get_args( + ctypes.byref(arguments), + ctypes.c_void_p(int(device_workspace)), + device_sm_count(), + self.operation.rt_module.occupancy + ) + host_workspace = bytearray(res_arg.contents) + + grid = self.operation.rt_module.get_grid_shape( + ctypes.byref(arguments), + device_sm_count(), + self.operation.rt_module.occupancy + ) + + device_workspace = None + + self.host_workspace = host_workspace + self.device_workspace = device_workspace + self.launch_config = LaunchConfiguration( + [grid.m, grid.n, grid.k], + [self.operation.rt_module.threads, 1, 1], + self.operation.rt_module.shared_memory_capacity + ) + + +class GemmArguments3x(GemmArguments2x): + """ + Argument wrapper for GEMM in CUTLASS 3. It encodes problem information and + user-provide tensors into the kernel's argument + + :param operation: the GEMM operation to take the argument + :type operation: :class:`cutlass.backend.GemmOperationUniversal` | + :class:`cutlass.backend.GemmOperationGrouped` + + :param problem_size: GEMM problem size gemm(M, N, K) + :type operation: :class:`cutlass.shape.GemmCoord` + + :param A: tensor A + :type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param B: tensor B + :type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param C: tensor C + :type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param D: tensor D + :type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param gemm_mode: GEMM mode + :type gemm_mode: GemmUniversalMode + + :param output_op: output operator, optional + :type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments` + """ + + def __init__(self, operation, problem_size, A, B, C, D, gemm_mode=GemmUniversalMode.Gemm, **kwargs): + if gemm_mode not in [GemmUniversalMode.Gemm, GemmUniversalMode.Batched]: + raise Exception(f"Unsupported GEMM mode {gemm_mode}.") + + super().__init__(operation, problem_size, A, B, C, D, gemm_mode, **kwargs) + + def get_arguments(self): + problem_size_ = GemmCoordBatched_(self.problem_size, self.batch_count) + + if self.batch_count > 1: + bsA = self.batched_stride_A + bsB = self.batched_stride_B + bsC = self.batched_stride_C + bsD = self.batched_stride_D + else: + bsA = 0 + bsB = 0 + bsC = 0 + bsD = 0 + stride_A = StrideBatched_(self.lda, bsA) + stride_B = StrideBatched_(self.ldb, bsB) + stride_C = StrideBatched_(self.ldc, bsC) + stride_D = StrideBatched_(self.ldd, bsD) + + # Superset of potential mainloop arguments + generic_args = GenericMainloopArguments3x_( + int(self.ptr_A), + stride_A, + int(self.ptr_B), + stride_B, + 4 # mma_promotion_interval + ) + + # Set of mainloop arguments needed for this kernel + mainloop = self.operation.rt_module.mainloop_args.from_generic_mainloop_args(generic_args) + + epilogue = self.operation.rt_module.epilogue_args( + self.output_op, + int(self.ptr_C), + stride_C, + int(self.ptr_D), + stride_D, + ) + + # Set hardware info + hw_info = self.operation.rt_module.hw_info(0, device_sm_count()) + + self.arguments = self.operation.argument_type( + int(self.gemm_mode), + problem_size_, + mainloop, + epilogue, + hw_info, + ) + return self.arguments + + def initialize(self): + # Get the host and evice workspace + device_workspace_size = self.operation.rt_module.get_device_workspace_size(self) + + if device_workspace_size > 0: + self.workspace_buffer = device_mem_alloc(device_workspace_size) + workspace_ptr = self.workspace_buffer.ptr + err, = cuda.cuMemsetD32( + workspace_ptr, 0, device_workspace_size // 4) + else: + workspace_ptr = None + + device_workspace = 0 + if workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.GemmSplitKParallel: + # In GEMM splik-K parallel, the D pointer is redirected to the workspace + self.ptr_D = cuda.CUdeviceptr(workspace_ptr) + elif workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.Gemm: + device_workspace = workspace_ptr + + self.get_arguments() + res_arg = self.operation.rt_module.get_args( + ctypes.byref(self.arguments), + ctypes.c_void_p(int(device_workspace)), + ) + host_workspace = bytearray(res_arg.contents) + + grid = self.operation.rt_module.get_grid_shape( + ctypes.byref(self.arguments), + ctypes.c_void_p(int(device_workspace)), + ) + block = self.operation.rt_module.get_block_shape() + + device_workspace = None + + self.host_workspace = host_workspace + self.device_workspace = device_workspace + self.launch_config = LaunchConfiguration( + [grid.x, grid.y, grid.z], + [block.x, block.y, block.z], + self.operation.rt_module.shared_memory_capacity, + ) + + +def GemmArguments(operation, problem_size, A, B, C, D, gemm_mode=GemmUniversalMode.Gemm, **kwargs): + """ + Argument wrapper for GEMM in CUTLASS 2 or 3. It returns either 2x arguments + or 3x arguments depending on the `arch` field specified in `operation`. + + :param operation: the GEMM operation to take the argument + :type operation: :class:`cutlass.backend.GemmOperationUniversal` | + :class:`cutlass.backend.GemmOperationGrouped` + + :param problem_size: GEMM problem size gemm(M, N, K) + :type operation: :class:`cutlass.shape.GemmCoord` + + :param A: tensor A + :type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param B: tensor B + :type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param C: tensor C + :type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param D: tensor D + :type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray + + :param gemm_mode: GEMM mode + :type gemm_mode: :class:`cutlass.GemmUniversalMode` + + :param output_op: output operator, optional + :type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments` + """ + if operation.swizzling_functor == SwizzlingFunctor.StreamK: + if operation.api == ApiVersion.v3x: + raise Exception("Stream K is currently only supported in CUTLASS 2.x") + ArgClass = GemmArguments2xStreamK + else: + ArgClass = GemmArguments3x if operation.api == ApiVersion.v3x else GemmArguments2x + return ArgClass(operation, problem_size, A, B, C, D, gemm_mode, **kwargs) + + +class GemmGroupedArguments: + """ + Argument wrapper for GEMM Grouped. It encodes problem information and + user-provide tensors into the kernel's argument + + :param operation: the GEMM Grouped operation to take the argument + :type operation: :class:`cutlass.backend.GemmOperationGrouped` + + :param problem_size: list of GEMM problem size gemm(M, N, K) + :type operation: list[:class:`cutlass.shape.GemmCoord`] + + :param A: list of tensor A + :type A: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray] + + :param B: list of tensor B + :type B: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray] + + :param C: list of tensor C + :type C: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray] + + :param D: list of tensor D + :type D: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray] + + :param output_op: output operator, optional + :type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments` + """ + + def __init__(self, operation, problem_sizes, A, B, C, D, **kwargs): + # Get number of problems in the group + self.problem_count = len(problem_sizes) + + # Check the input arguments + assert len(A) == self.problem_count + assert len(B) == self.problem_count + assert len(C) == self.problem_count + assert len(D) == self.problem_count + + problem_size_host = [] + self.ptr_A_host = [] + self.ptr_B_host = [] + self.ptr_C_host = [] + self.ptr_D_host = [] + + lda_host = [] + ldb_host = [] + ldc_host = [] + ldd_host = [] + + self.partitions = 1 + + self.operation = operation + + # Get the threadblock + threadblock_shape = operation.tile_description.threadblock_shape + self.threadblock_shape = GemmCoord( + threadblock_shape[0], + threadblock_shape[1], + threadblock_shape[2], + ) + self.threadblock_swizzle = operation.swizzling_functor + + self.total_tiles = 0 + + self.gemm_arguments = [] + + # Process the input arguments + for idx, problem_size in enumerate(problem_sizes): + M, N, K = problem_size.m, problem_size.n, problem_size.k + temp_argument = GemmArguments2x( + operation=operation, + problem_size=GemmCoord(M, N, K), + A=A[idx], B=B[idx], C=C[idx], D=D[idx]) + self.gemm_arguments.append(temp_argument) + + problem_size_host.append( + [temp_argument.problem_size.m, + temp_argument.problem_size.n, + temp_argument.problem_size.k] + ) + + self.ptr_A_host.append(int(temp_argument.ptr_A)) + lda_host.append(temp_argument.lda) + + self.ptr_B_host.append(int(temp_argument.ptr_B)) + ldb_host.append(temp_argument.ldb) + + self.ptr_C_host.append(int(temp_argument.ptr_C)) + ldc_host.append(temp_argument.ldc) + + self.ptr_D_host.append(int(temp_argument.ptr_D)) + ldd_host.append(temp_argument.ldd) + + # Get number of tiles + grid = self.operation.rt_module.get_grid_shape( + self.operation.rt_module.get_tiled_shape( + temp_argument.problem_size.ctype, + self.threadblock_shape.ctype, + temp_argument.batch_count + ) + ) + self.total_tiles += grid.x * grid.y * grid.z + + self.problem_size_buffer = todevice(problem_size_host, np.int32) + self.ptr_A_buffer = todevice(self.ptr_A_host, np.int64) + self.ptr_B_buffer = todevice(self.ptr_B_host, np.int64) + self.ptr_C_buffer = todevice(self.ptr_C_host, np.int64) + self.ptr_D_buffer = todevice(self.ptr_D_host, np.int64) + + self.lda_buffer = todevice(lda_host, np.int64) + self.ldb_buffer = todevice(ldb_host, np.int64) + self.ldc_buffer = todevice(ldc_host, np.int64) + self.ldd_buffer = todevice(ldd_host, np.int64) + + if "output_op" in kwargs.keys(): + self.alpha = kwargs["output_op"].alpha + self.beta = kwargs["output_op"].beta + else: + self.alpha = 1.0 + self.beta = 0.0 + + if "output_op" in kwargs.keys(): + self.output_op = kwargs["output_op"] + else: + self.output_op = self.operation.epilogue_type(1.0, 0.0) + + # Get host problem size + self.host_problem_size_ptr = np.array(problem_size_host, dtype=np.int32).__array_interface__["data"][0] + + self.arguments = self.get_arguments() + + self.initialize() + + def get_arguments(self): + return self.operation.argument_type( + self.problem_size_buffer.ptr, + self.problem_count, + self.total_tiles, + self.output_op, + self.ptr_A_buffer.ptr, + self.ptr_B_buffer.ptr, + self.ptr_C_buffer.ptr, + self.ptr_D_buffer.ptr, + self.lda_buffer.ptr, + self.ldb_buffer.ptr, + self.ldc_buffer.ptr, + self.ldd_buffer.ptr, + ctypes.c_void_p(int(self.host_problem_size_ptr)), + ) + + def initialize(self): + # Get launch configuration + launch_config = self.operation.rt_module.plan(self) + + # Get the host and evice workspace + device_workspace_size = self.operation.rt_module.get_device_workspace_size(self) + + if device_workspace_size > 0: + self.workspace_buffer = device_mem_alloc(device_workspace_size) + workspace_ptr = self.workspace_buffer.ptr + err, = cuda.cuMemsetD32( + workspace_ptr, 0, device_workspace_size // 4) + else: + workspace_ptr = None + + if self.operation.precompute_mode == SchedulerMode.Host: + device_workspace_ptr = self.operation.rt_module.host_precompute( + self, self.operation.rt_module.get_workspace_size(self),) + else: + device_workspace_ptr = 0 + + result = self.operation.rt_module.get_args( + ctypes.byref(self.arguments), + self.total_tiles, + ctypes.c_void_p(int(device_workspace_ptr)), + ) + host_workspace = bytearray(result.contents) + + device_workspace = None + + self.host_workspace = host_workspace + self.device_workspace = device_workspace + self.launch_config = launch_config + + def sync(self): + err, = cudart.cudaDeviceSynchronize() + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError("CUDA Error %s" % str(err)) + for arg in self.gemm_arguments: + arg.sync(stream_sync=False) + + +################################################################################ +# Base class for GEMM runtime module +################################################################################ + + +class GemmRTbase(ExecutableOperation): + """ + GemmRT manages the CUTLASS runtime components + """ + + KernelTemplate = r""" +extern "C" +__global__ void +${operation_name}(${operation_name}${operation_suffix}::Params params) { + + // Dynamic shared memory base pointer + extern __shared__ int SharedStorageBase[]; + + // Declare pointer to dynamic shared memory. + ${operation_name}${operation_suffix}::SharedStorage *shared_storage = + reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase); + + ${operation_name}${operation_suffix}::invoke(params, *shared_storage); +} + """ + + def __init__(self, operation: "GemmOperation"): + super().__init__(operation) + + self.operation = operation + threadblock_shape = operation.tile_description.threadblock_shape + self.threadblock_shape = GemmCoord( + threadblock_shape[0], threadblock_shape[1], threadblock_shape[2]) + self.threadblock_swizzle = operation.swizzling_functor + + # Threads per threadblock + self.threads = operation.tile_description.num_threads + + def emit(self): + return self.emitter.emit(self.operation) + + def can_implement(self, configuration, arguments): + raise NotImplementedError() + + def get_host_workspace_size(self, arguments): + raise NotImplementedError() + + def get_device_workspace_size(self, arguments): + return 0 + + def initialize(self): + err, = cuda.cuFuncSetAttribute( + self.kernel, + attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, + value=self.shared_memory_capacity) + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError( + f"CUDA error on call to cuFuncSetAttribute: {cuda.cuGetErrorString(err)[1]}" + ) + + +################################################################################ +# Runtime module for GEMM Universal +################################################################################ + + +class GemmRTUniversal(GemmRTbase): + """ + GemmRTUniversal manages the CUTLASS runtime components + """ + + HostTemplate = r""" +extern "C" { + // Get the size of params in bytes + int ${operation_name}_get_param_size(){ + return sizeof(${operation_name}${operation_suffix}::Params); + } + + // Get the size of dynamic shared memory in bytes + int ${operation_name}_shared_memory_size() { + return int(sizeof(${operation_name}${operation_suffix}::SharedStorage)); + } + + // Get the params as byte array + char* ${operation_name}_get_params(${operation_name}_base::Arguments* argument, int* workspace){ + ${operation_name}_base::Params* params; + params = new ${operation_name}_base::Params(*argument, + -1, // SM count. Only used for stream-K + -1 // Occupancy. Only used for stream-K + ); + + // Semaphore holds the pointer to the workspace in the Params struct + params->semaphore = workspace; + + char *bytes = ((char*)(params)); + char *output = new char[sizeof(${operation_name}_base::Params)]; + for (unsigned int i = 0; i < sizeof(${operation_name}_base::Params); i ++) + output[i] = bytes[i]; + + return output; + } + + cutlass::gemm::GemmCoord ${operation_name}_get_tiled_shape( + cutlass::gemm::GemmCoord problem_size, cutlass::gemm::GemmCoord tile_size, int split_k_slices) { + return ${operation_name}_base::ThreadblockSwizzle::get_tiled_shape( + problem_size, tile_size, split_k_slices); + } + + dim3 ${operation_name}_get_grid_shape(cutlass::gemm::GemmCoord tiled_shape) { + return ${operation_name}_base::ThreadblockSwizzle::get_grid_shape(tiled_shape); + } +} + """ + + def __init__(self, operation): + super(GemmRTUniversal, self).__init__(operation) + self.extra_funcs = { + "get_tiled_shape": GemmCoord_, + "get_grid_shape": dim3_, + } + self.emitter = EmitGemmUniversalInstance( + "_type", operation.direct_store) + + self.argument_type, self.epilogue_type = get_gemm_arguments(operation.epilogue_functor) + self.argtype = [ + ctypes.POINTER(self.argument_type), + ctypes.POINTER(GemmCoord_), ctypes.c_int, ctypes.c_void_p + ] + + def plan(self, arguments): + grid = self.get_tiled_shape( + arguments.problem_size.ctype, + self.threadblock_shape.ctype, + arguments.batch_count + ) + + gemm_k_size = arguments.problem_size.k + if arguments.gemm_mode in [GemmUniversalMode.Gemm, GemmUniversalMode.GemmSplitKParallel]: + alignk = max(max(128 // DataTypeSize[self.operation.A.element], + 128 // DataTypeSize[self.operation.B.element]), 1) + + gemm_k_size = (((arguments.problem_size.k + arguments.batch_count - 1) // + arguments.batch_count + alignk - 1) // alignk) * alignk + + if gemm_k_size: + grid_z = (arguments.problem_size.k + gemm_k_size - 1) // gemm_k_size + grid = GemmCoord(grid.m, grid.n, grid_z).ctype + + arguments.grid_tiled_shape = dim3_(grid.m, grid.n, grid.k) + grid = self.get_grid_shape(grid) + arguments.gemm_k_size = gemm_k_size + return LaunchConfiguration( + [grid.x, grid.y, grid.z], + [self.threads, 1, 1], + self.shared_memory_capacity) + + def get_device_workspace_size(self, arguments: GemmArguments): + workspace_bytes = 0 + if arguments.gemm_mode == GemmUniversalMode.GemmSplitKParallel: + workspace_bytes = (DataTypeSize[arguments.operation.C.element] + * arguments.batched_stride_D * arguments.grid_tiled_shape.z // 8) + elif (arguments.gemm_mode == GemmUniversalMode.Gemm and + arguments.split_k_slices > 1): + workspace_bytes = 4 * arguments.grid_tiled_shape.x * arguments.grid_tiled_shape.y + + return workspace_bytes + + +class GemmRTUniversalStreamK(GemmRTUniversal): + """ + Manages the CUTLASS runtime components for 2.x stream K kernels + """ + + HostTemplate = r""" +extern "C" { + // Get the size of params in bytes + int ${operation_name}_get_param_size(){ + return sizeof(${operation_name}${operation_suffix}::Params); + } + + // Get the size of dynamic shared memory in bytes + int ${operation_name}_shared_memory_size() { + return int(sizeof(${operation_name}${operation_suffix}::SharedStorage)); + } + + using GemmType = ${operation_name}_base; + + // Get the params as byte array + char* ${operation_name}_get_params(GemmType::Arguments* argument, int* workspace, + int sm_count, int occupancy) { + GemmType::Params* params; + params = new GemmType::Params(*argument, sm_count, occupancy); + + params->init_workspace(workspace); + + char *bytes = ((char*)(params)); + char *output = new char[sizeof(GemmType::Params)]; + for (unsigned int i = 0; i < sizeof(GemmType::Params); i ++) + output[i] = bytes[i]; + + return output; + } + + dim3 ${operation_name}_get_grid_shape(GemmType::Arguments* args, int device_sms, int sm_occupancy) { + typename GemmType::Params params(*args, device_sms, sm_occupancy); + return params.get_grid_dims(); + } +} + """ + + def __init__(self, operation: "GemmOperation"): + super(GemmRTUniversalStreamK, self).__init__(operation) + self.extra_funcs = { + "get_grid_shape": GemmCoord_, + } + self._occupancy = None + self.argument_type, self.epilogue_type = get_gemm_arguments_streamk(operation.epilogue_functor) + + @property + def occupancy(self): + if self._occupancy is None: + err, self._occupancy = cuda.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( + self.kernel, self.threads, self.shared_memory_capacity, + cuda.CUoccupancy_flags.CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE) + + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError( + "CUDA error on call to cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags: " + f"{cuda.cuGetErrorString(err)[1]}") + return self._occupancy + + +################################################################################ +# Runtime module for GEMM Universal within CUTLASS 3 +################################################################################ + + +class GemmRTUniversal3x(GemmRTUniversal): + """ + Manages the CUTLASS runtime components for 3.x kernels + """ + + KernelTemplate = r""" + +using Operator = ${operation_name}${operation_suffix}; +extern "C" +__global__ __launch_bounds__(Operator::MaxThreadsPerBlock, Operator::MinBlocksPerMultiprocessor) +void ${operation_name}(__grid_constant__ typename Operator::Params const params) { + // Dynamic shared memory base pointer + extern __shared__ char smem[]; + + // Declare pointer to dynamic shared memory. + Operator op; + op(params, smem); +} + """ + HostTemplate = r""" +extern "C" { + // Get the size of params in bytes + int ${operation_name}_get_param_size(){ + return sizeof(${operation_name}${operation_suffix}::Params); + } + + // Get the size of dynamic shared memory in bytes + int ${operation_name}_shared_memory_size() { + return ${operation_name}${operation_suffix}::SharedStorageSize; + } + + using GemmType = ${operation_name}_base; + + // Get the workspace size + uint64_t ${operation_name}_get_kernel_workspace_size(GemmType::Arguments* argument) { + return GemmType::get_workspace_size(*argument); + } + + // Get the params as byte array + char* ${operation_name}_get_params(GemmType::Arguments* argument, int* workspace){ + GemmType::Params params = GemmType::to_underlying_arguments(*argument, workspace); + char *bytes = ((char*)(¶ms)); + char *output = new char[sizeof(GemmType::Params)]; + for (unsigned int i = 0; i < sizeof(GemmType::Params); i ++) + output[i] = bytes[i]; + + return output; + } + + // Get the total number of blocks for a persistent kernel + uint64_t ${operation_name}_get_persistent_tiled_blk_shape_mnl(GemmType::ProblemShape problem) { + auto problem_shape_MNKL = append<4>(problem, Int<1>{}); + auto [problem_blocks_m, problem_blocks_n, problem_blocks_l] = + cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::get_tiled_cta_shape_mnl( + problem_shape_MNKL, GemmType::TileShape{}, GemmType::DispatchPolicy::ClusterShape{}); + return problem_blocks_m * problem_blocks_n * problem_blocks_l; + } + + // Get the grid shape + dim3 ${operation_name}_get_grid_shape(GemmType::Arguments* args, int* workspace) { + auto tmp_params = GemmType::to_underlying_arguments(*args, workspace); + return GemmType::get_grid_shape(tmp_params); + } + + // Get the block shape + dim3 ${operation_name}_get_block_shape() { + return GemmType::get_block_shape(); + } +} + """ + + def __init__(self, operation): + super(GemmRTUniversal3x, self).__init__(operation) + self.extra_funcs = { + "get_grid_shape": dim3_, + "get_block_shape": dim3_, + "get_persistent_tiled_blk_shape_mnl": ctypes.c_uint64, + "get_kernel_workspace_size": ctypes.c_uint64 + } + self.emitter = EmitGemmUniversalInstance3x("_type") + self.mainloop_args = get_mainloop_arguments_3x( + operation.tile_description.kernel_schedule, + operation.A.element, + operation.B.element, + operation.A.alignment, + operation.B.alignment + ) + self.argument_type, self.epilogue_args, self.epilogue_type, self.hw_info = get_gemm_arguments_3x(self.mainloop_args, operation.epilogue_functor) + + def get_device_workspace_size(self, arguments: GemmArguments3x): + return self.get_kernel_workspace_size(ctypes.byref(arguments.get_arguments())) + + +class EmitGemmUniversalInstance3x: + """Responsible for emitting a CUTLASS 3 template definition""" + + def __init__(self, operation_suffix=""): + self.operation_suffix = operation_suffix + self.includes = [ + "cutlass/cutlass.h", + "cute/tensor.hpp", + "cute/atom/mma_atom.hpp", + "cutlass/numeric_types.h", + "cutlass/gemm/collective/collective_builder.hpp", + "cutlass/gemm/kernel/sm90_tile_scheduler.hpp", + "cutlass/gemm/kernel/gemm_universal.hpp", + "cutlass/epilogue/collective/collective_builder.hpp", + "cutlass/epilogue/collective/default_epilogue.hpp", + "cutlass/epilogue/thread/linear_combination.h" + ] + self.gemm_template_kernel = """ +using namespace cute; + +using CollectiveEpilogue = + typename cutlass::epilogue::collective::CollectiveBuilder< + ${arch}, ${opcode_class}, + cute::Shape, + cute::Shape, + cutlass::epilogue::collective::EpilogueTileAuto, + ${element_accumulator}, ${element_epilogue}, + ${element_c}, ${layout_c}, ${align_c}, + ${element_d}, ${layout_d}, ${align_d}, + ${epilogue_schedule} + >::CollectiveOp; + +using CollectiveMainloop = + typename cutlass::gemm::collective::CollectiveBuilder< + ${arch}, ${opcode_class}, + ${element_a}, ${layout_a}, ${align_a}, + ${element_b}, ${layout_b}, ${align_b}, + ${element_accumulator}, + cute::Shape, + cute::Shape, + ${stage_count_type}, + ${kernel_schedule} + >::CollectiveOp; + +// Gemm operator ${operation_name} +using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + ${tile_scheduler} +>; + +// Define named type +struct ${operation_name}${operation_suffix} : + public ${operation_name}_base { }; +""" + self.gemm_template_kernel_visitor = """ +using namespace cute; + +${callback_decl} + +using CollectiveEpilogue = + typename cutlass::epilogue::collective::CollectiveBuilder< + ${arch}, ${opcode_class}, + cute::Shape, + cute::Shape, + cutlass::epilogue::collective::EpilogueTileAuto, + ${element_accumulator}, ${element_epilogue}, + ElementC, StrideC, ${align_c}, + ElementD, StrideD, ${align_d}, + ${epilogue_schedule}, + ${callback_name} + >::CollectiveOp; + +using CollectiveMainloop = + typename cutlass::gemm::collective::CollectiveBuilder< + ${arch}, ${opcode_class}, + ${element_a}, ${layout_a}, ${align_a}, + ${element_b}, ${layout_b}, ${align_b}, + ${element_accumulator}, + cute::Shape, + cute::Shape, + ${stage_count_type}, + ${kernel_schedule} + >::CollectiveOp; + +// Gemm operator ${operation_name} +using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal< + Shape, + CollectiveMainloop, + CollectiveEpilogue, + ${tile_scheduler} +>; + +// Define named type +struct ${operation_name}${operation_suffix} : + public ${operation_name}_base { }; +""" + + self.gemm_template_device = self.gemm_template_kernel + """ + +// Define device-level operator +using DeviceKernel = cutlass::gemm::device::GemmUniversalAdapter<${operation_name}${operation_suffix}>; +""" + + def emit(self, operation): + # Support built-in epilogue functors or user-defined functions + + if operation.tile_description.stages is None or operation.tile_description.stages == 0: + stage_count_type = "cutlass::gemm::collective::StageCountAutoCarveout" + else: + stage_count_type = "_" + str(operation.tile_description.stages) + + if operation.emission_type == EmissionType.Kernel: + gemm_template = self.gemm_template_kernel + else: + gemm_template = self.gemm_template_device + + kschedule = KernelScheduleType.ScheduleAuto + eschedule = EpilogueScheduleType.ScheduleAuto + tschedule = TileSchedulerType.Default + if operation.tile_description.kernel_schedule is not None: + kschedule = operation.tile_description.kernel_schedule + if operation.tile_description.epilogue_schedule is not None: + eschedule = operation.tile_description.epilogue_schedule + if operation.tile_description.tile_scheduler is not None: + tschedule = operation.tile_description.tile_scheduler + + values = { + "operation_name": operation.procedural_name(), + "operation_suffix": self.operation_suffix, + "element_a": DataTypeTag[operation.A.element], + "layout_a": LayoutTag[operation.A.layout], + "element_b": DataTypeTag[operation.B.element], + "layout_b": LayoutTag[operation.B.layout], + "element_c": DataTypeTag[operation.C.element], + "layout_c": LayoutTag[operation.C.layout], + "element_d": DataTypeTag[operation.epilogue_functor.element_output], + "layout_d": LayoutTag[operation.C.layout], + "element_accumulator": DataTypeTag[operation.accumulator_type()], + "element_epilogue": DataTypeTag[operation.epilogue_functor.element_epilogue], + "opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + "arch": "cutlass::arch::Sm%d" % operation.arch, + "threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]), + "threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]), + "threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]), + "cluster_m": str(operation.tile_description.cluster_shape[0]), + "cluster_n": str(operation.tile_description.cluster_shape[1]), + "cluster_k": str(operation.tile_description.cluster_shape[2]), + "align_a": str(operation.A.alignment), + "align_b": str(operation.B.alignment), + "align_c": str(operation.C.alignment), + "align_d": str(operation.C.alignment), + "stage_count_type": stage_count_type, + "kernel_schedule": KernelScheduleTag[kschedule], + "epilogue_schedule": EpilogueScheduleTag[eschedule], + "tile_scheduler": TileSchedulerTag[tschedule] + } + if hasattr(operation.epilogue_functor, "visitor"): + callback_name, callback_decl = operation.epilogue_functor.emit(operation) + values["callback_name"] = callback_name + values["callback_decl"] = callback_decl + return SubstituteTemplate(self.gemm_template_kernel_visitor, values) + + else: + values["epilogue_functor"] = operation.epilogue_functor.emit() + return SubstituteTemplate(gemm_template, values) + + +################################################################################################### +# Runtime module for GEMM Grouped +################################################################################################### + + +class GemmRTGrouped(GemmRTbase): + """ + GemmRTGrouped manages the CUTLASS runtime components + """ + + KernelTemplate = r""" +extern "C" +__global__ void +${operation_name}(${operation_name}${operation_suffix}::Params params) { + + // Dynamic shared memory base pointer + extern __shared__ int SharedStorageBase[]; + + // Declare pointer to dynamic shared memory. + ${operation_name}${operation_suffix}::SharedStorage *shared_storage = + reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase); + + ${operation_name}${operation_suffix} op; + + op(params, *shared_storage); +} + """ + + HostTemplate = r""" + extern "C" { + + // precompute scheduling information + char * ${operation_name}_precompute(${operation_name}_base::Arguments const &args, int tile_count, size_t workspace_bytes) { + char* host_workspace = new char[workspace_bytes]; + ${operation_name}_base::ProblemVisitor::host_precompute( + args.host_problem_sizes, + args.problem_count, + args.threadblock_count, + (void*)host_workspace + ); + return host_workspace; + } + + // Get the size of params in bytes + int ${operation_name}_get_param_size(){ + return sizeof(${operation_name}${operation_suffix}::Params); + } + + // Get the size of dynamic shared memory in bytes + int ${operation_name}_shared_memory_size() { + return int(sizeof(${operation_name}${operation_suffix}::SharedStorage)); + } + + // Get the params as byte array + char* ${operation_name}_get_params(${operation_name}_base::Arguments* argument, int tile_count, void* workspace=nullptr){ + ${operation_name}_base::Params* params; + params = new ${operation_name}_base::Params(*argument, workspace, tile_count); + + char *bytes = ((char*)(params)); + char *output = new char[sizeof(${operation_name}_base::Params)]; + for (unsigned int i = 0; i < sizeof(${operation_name}_base::Params); i ++) + output[i] = bytes[i]; + + return output; + } + + cutlass::gemm::GemmCoord ${operation_name}_get_tiled_shape( + cutlass::gemm::GemmCoord problem_size, cutlass::gemm::GemmCoord tile_size, int split_k_slices) { + return ${operation_name}_base::ThreadblockSwizzle::get_tiled_shape( + problem_size, tile_size, split_k_slices); + } + + dim3 ${operation_name}_get_grid_shape(cutlass::gemm::GemmCoord tiled_shape) { + return ${operation_name}_base::ThreadblockSwizzle::get_grid_shape(tiled_shape); + } + } + """ + + def __init__(self, operation: "GemmOperation"): + super(GemmRTGrouped, self).__init__(operation) + self.extra_funcs = { + "precompute": None, + "get_tiled_shape": GemmCoord_, + "get_grid_shape": dim3_, + } + self.emitter = EmitGemmGroupedInstance("_type") + self.argument_type, self.epilogue_type = get_gemm_grouped_arguments(operation.epilogue_functor) + self.argtype = [ctypes.POINTER(self.argument_type), ctypes.c_int, ctypes.c_void_p] + + def host_precompute(self, arguments, workspace_bytes): + self.precompute.argtype = [ + self.argtype[0], ctypes.c_int, ctypes.c_longlong] + self.precompute.restype = ctypes.POINTER(ctypes.c_byte * workspace_bytes) + + problem_info = self.precompute( + ctypes.byref(arguments.arguments), + arguments.total_tiles, + workspace_bytes) + problem_info_array = bytearray(problem_info.contents) + + # copy to device memory + return rmm.DeviceBuffer.to_device(problem_info_array).ptr + + def plan(self, arguments): + return LaunchConfiguration( + [arguments.total_tiles, 1, 1], + [self.threads, 1, 1], + self.shared_memory_capacity, + ) + + def get_workspace_size(self, arguments): + if self.operation.precompute_mode == SchedulerMode.Device: + return 0 + elif self.operation.precompute_mode == SchedulerMode.Host: + total_tiles = arguments.total_tiles + entries_per_block = 1 + return 8 * entries_per_block * total_tiles # three int32_t + + +################################################################################ +# Runtime module for GEMM and grouped GEMM +################################################################################ + + +class GemmOperationBase: + """ + CUTLASS GEMM operation + """ + + def __init__( + self, gemm_kind, arch, tile_description: TileDescription, + A: TensorDescription, B: TensorDescription, C: TensorDescription, + epilogue_functor, swizzling_functor=SwizzlingFunctor.Identity1, + api=ApiVersion.v2x, emission_type=EmissionType.Kernel, **kwargs): + self.operation_kind: OperationKind = OperationKind.Gemm + self.arch: int = arch + self.tile_description: TileDescription = tile_description + self.gemm_kind: GemmKind = gemm_kind + + self.api = api + self.prefix = "3x" if self.api == ApiVersion.v3x else "" + self.emission_type = emission_type + + # Optionally swap the TensorDescriptions for operands A and B and transpose their + # layouts. This is needed to mimic the transpose performed by device::GemmUniversal. + # The code below uses deep copy to avoid overwritting the original TensorDescription + self.switched = (self.api != ApiVersion.v3x and + self.emission_type == EmissionType.Kernel and + C.layout == LayoutType.ColumnMajor) + + self.A, self.B, self.C = GemmOperationBase.get_operands(A, B, C, self.switched) + + self.epilogue_functor = epilogue_functor + self.swizzling_functor = swizzling_functor + + if "direct_store" in kwargs: + self.direct_store = kwargs["direct_store"] + else: + self.direct_store = False + + @staticmethod + def get_operands(A: TensorDescription, B: TensorDescription, C: TensorDescription, swap: bool): + """ + Makes copies of A, B, and C, and possibly transposes their order. If ``swap`` is set, + A and B are swapped, and the layout of A, B, and C are transposed. + + :param A: description of operand A + :type A: TensorDescription + :param B: description of operand B + :type B: TensorDescription + :param C: description of operand C + :type C: TensorDescription + + :return: descriptions of operands A, B, and C + :rtype: tuple[TileDescription] + """ + if swap: + A_out = copy.deepcopy(B) + B_out = copy.deepcopy(A) + C_out = copy.deepcopy(C) + A_out.layout = transpose_layout(A_out.layout) + B_out.layout = transpose_layout(B_out.layout) + C_out.layout = transpose_layout(C_out.layout) + else: + A_out = copy.deepcopy(A) + B_out = copy.deepcopy(B) + C_out = copy.deepcopy(C) + return A_out, B_out, C_out + + def run(self, arguments: GemmArguments) -> cuda.CUresult: + """ + Configure and launch the cuda kernel with input arguments + """ + if self.emission_type == EmissionType.Device: + raise Exception('Running a kernel via PyCUTLASS is only enabled with emission type "Kernel"') + + err = self.rt_module.run( + arguments.host_workspace, + arguments.device_workspace, + arguments.launch_config, + ) + + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError("CUDA Error %s" % str(err)) + + return err + + def free(self): + if hasattr(self, "workspace_buffer"): + del self.workspace_buffer + + def is_complex(self): + complex_operators = [ + MathOperation.multiply_add_complex, + MathOperation.multiply_add_complex_gaussian, + MathOperation.multiply_add_complex_fast_f32, + ] + return self.tile_description.math_instruction.math_operation in complex_operators + + def is_planar_complex(self): + return self.gemm_kind in (GemmKind.PlanarComplex, GemmKind.PlanarComplexArray) + + def accumulator_type(self): + accum = self.tile_description.math_instruction.element_accumulator + + if self.is_complex(): + return get_complex_from_real(accum) + + return accum + + def short_math_name(self): + if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian: + return "g%s" % ShortDataTypeNames[self.accumulator_type()] + return ShortDataTypeNames[self.accumulator_type()] + + def core_name(self): + """The basic operation kind is prefixed with a letter indicating the accumulation type.""" + + inst_shape = "" + inst_operation = "" + intermediate_type = "" + + math_operations_map = { + MathOperation.xor_popc: "xor", + } + + if (self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or + self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp): + math_op = self.tile_description.math_instruction.math_operation + math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else "" + + if self.tile_description.math_instruction.instruction_shape is not None: + if self.api == ApiVersion.v3x and self.arch >= 90: + inst_shape = "%dx%dx%d" % tuple( + self.tile_description.math_instruction.instruction_shape) + else: + inst_shape = "%d%d%d" % tuple( + self.tile_description.math_instruction.instruction_shape) + else: + inst_shape = "Default" + inst_shape += math_op_string + + if (self.tile_description.math_instruction.element_a != self.A.element and + self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator): + intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] + + return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, GemmKindNames[self.gemm_kind]) + + def extended_name(self): + """Append data types if they differ from compute type.""" + if self.is_complex(): + extended_name = "${core_name}" + else: + if (self.C.element != self.tile_description.math_instruction.element_accumulator and + self.A.element != self.tile_description.math_instruction.element_accumulator): + extended_name = "${element_c}_${core_name}_${element_a}" + elif (self.C.element == self.tile_description.math_instruction.element_accumulator and + self.A.element != self.tile_description.math_instruction.element_accumulator): + extended_name = "${core_name}_${element_a}" + else: + extended_name = "${core_name}" + + extended_name = SubstituteTemplate(extended_name, { + "element_a": DataTypeNames[self.A.element], + "element_c": DataTypeNames[self.C.element], + "core_name": self.core_name(), + }) + + return extended_name + + def extended_name_3x(self): + """Generates a string representing the MMA atom. Assumes accumulator type is C type.""" + extended_name = "{core_name}_{element_a}_{element_b}_{element_acc}_{element_c}_{element_d}".format( + element_a=DataTypeNames[self.A.element], + element_b=DataTypeNames[self.B.element], + element_acc=DataTypeNames[self.tile_description.math_instruction.element_accumulator], + element_c=DataTypeNames[self.C.element], + element_d=DataTypeNames[self.C.element], + core_name=self.core_name()) + return extended_name + + def layout_name(self): + if self.is_complex() or self.is_planar_complex(): + return "%s%s" % ( + ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)], + ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)] + ) + return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout]) + + # Generates a short string representing the ABC layout tags (e.g. ntn or tnn) + def layout_name_3x(self): + if self.is_complex() or self.is_planar_complex(): + return "{}{}{}".format( + ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)], + ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)], + ShortComplexLayoutNames[(self.C.layout, self.C.complex_transform)]) + else: + return "{}{}{}".format( + ShortLayoutTypeNames[self.A.layout], + ShortLayoutTypeNames[self.B.layout], + ShortLayoutTypeNames[self.C.layout]) + + # Generates a short string representing underlying kernel schedule type + def kernel_schedule_name_3x(self): + if self.tile_description.kernel_schedule is None: + return KernelScheduleSuffixes[KernelScheduleType.ScheduleAuto] + else: + return KernelScheduleSuffixes[self.tile_description.kernel_schedule] + + # Generates a short string representing underlying epilogue schedule type + def epilogue_schedule_name_3x(self): + if self.tile_description.epilogue_schedule is None: + return EpilogueScheduleSuffixes[EpilogueScheduleType.ScheduleAuto] + else: + return EpilogueScheduleSuffixes[self.tile_description.epilogue_schedule] + + def procedural_name(self): + """The full procedural name indicates architecture, extended name, tile size, and layout.""" + opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class] + if self.api == ApiVersion.v3x and self.arch >= 90: + kernel_name_template = "cutlass{p}_sm{ar}_{op}_{ex}_{tbm}x{tbn}x{tbk}_{cm}x{cn}x{ck}_{l}_{s}_align{al}{k}{e}" + return kernel_name_template.format( + p=self.prefix, + ar=self.arch, + op=opcode_class_name, + ex=self.extended_name_3x(), + tbm=self.tile_description.threadblock_shape[0], + tbn=self.tile_description.threadblock_shape[1], + tbk=self.tile_description.threadblock_shape[2], + cm=self.tile_description.cluster_shape[0], + cn=self.tile_description.cluster_shape[1], + ck=self.tile_description.cluster_shape[2], + l=self.tile_description.stages, + s=self.layout_name_3x(), + al=str(self.A.alignment), + k=self.kernel_schedule_name_3x(), + e=self.epilogue_schedule_name_3x() + ) + else: + threadblock = self.tile_description.procedural_name_2x() + return "cutlass{p}_{op}_{ex}_{tb}_{l}_align{a}".format( + p=self.prefix, + op=opcode_class_name, + ex=self.extended_name(), + tb=threadblock, + l=self.layout_name(), + a=str(self.A.alignment) + ) + + def configuration_name(self): + """The full procedural name indicates architecture, extended name, tile size, and layout.""" + return self.procedural_name() + + +class GemmOperationUniversal(GemmOperationBase): + def __init__(self, arch, tile_description: TileDescription, A: TensorDescription, B, C, + epilogue_functor, swizzling_functor=SwizzlingFunctor.Identity1, **kwargs): + api = api_version(arch, tile_description.math_instruction.opcode_class, A.element) + super(GemmOperationUniversal, self).__init__(GemmKind.Universal, arch, tile_description, + A, B, C, epilogue_functor, swizzling_functor, + api=api, **kwargs, ) + if api == ApiVersion.v3x: + if swizzling_functor == SwizzlingFunctor.StreamK: + raise Exception("Stream K swizzle functor is currently only supported for CUTLASS 2.x kernels") + self.rt_module = GemmRTUniversal3x(self) + else: + if swizzling_functor == SwizzlingFunctor.StreamK: + self.rt_module = GemmRTUniversalStreamK(self) + else: + self.rt_module = GemmRTUniversal(self) + self.argument_type = self.rt_module.argument_type + self.epilogue_type = self.rt_module.epilogue_type + + def device_op(self): + """ + Returns a new GemmOperationUniversal object that is constructed with emission type + ``EmissionType.Device``. Since the device-emitted kernel does not require swapping, + any swappng performed by the kernel-emitted operation is reversed. + + :return: operation ready for device-level code emission + :rtype: GemmUniversalOperation + """ + A, B, C = GemmOperationBase.get_operands(self.A, self.B, self.C, self.switched) + return GemmOperationUniversal(self.arch, self.tile_description, A, B, C, + self.epilogue_functor, self.swizzling_functor, + emission_type=EmissionType.Device, direct_store=self.direct_store) + + +class GemmOperationGrouped(GemmOperationBase): + def __init__(self, arch, tile_description: TileDescription, A: TensorDescription, B, C, + epilogue_functor, swizzling_functor=SwizzlingFunctor.Identity1, **kwargs): + super(GemmOperationGrouped, self).__init__(GemmKind.Grouped, arch, tile_description, + A, B, C, epilogue_functor, swizzling_functor, **kwargs) + assert "precompute_mode" in kwargs.keys(), "missing keyword arguement 'precompute_mode'." + self.precompute_mode = kwargs["precompute_mode"] + self.rt_module = GemmRTGrouped(self) + self.argument_type = self.rt_module.argument_type + self.epilogue_type = self.rt_module.epilogue_type + + def device_op(self): + """ + Returns a new GemmOperationGrouped object that is constructed with emission type + ``EmissionType.Device``. Since the device-emitted kernel does not require swapping, + any swappng performed by the kernel-emitted operation is reversed. + + :return: operation ready for device-level code emission + :rtype: GemmOperationGrouped + """ + A, B, C = GemmOperationBase.get_operands(self.A, self.B, self.C, self.switched) + return GemmOperationGrouped( + self.arch, self.tile_description, A, B, C, self.epilogue_functor, + self.swizzling_functor, emission_type=EmissionType.Device, + direct_store=self.direct_store, precompute_mode=self.precompute_mode, ) + + +################################################################################################### +# +# Emits single instances of a CUTLASS device-wide operator +# +################################################################################################### + + +class EmitGemmUniversalInstance: + """Responsible for emitting a CUTLASS template definition""" + + def __init__( + self, + operation_suffix="", + direct_store=False + ): + self.operation_suffix = operation_suffix + self.direct_store = direct_store + self.includes = [ + "cutlass/cutlass.h", + "cutlass/gemm_coord.h", + "cutlass/numeric_types.h", + "cutlass/arch/arch.h", + "cutlass/arch/mma.h", + "cutlass/layout/matrix.h", + "cutlass/gemm/device/gemm.h", + "cutlass/gemm/device/gemm_universal_adapter.h", + "cutlass/gemm/kernel/default_gemm_universal.h", + ] + if self.direct_store: + self.includes.append( + "cutlass/epilogue/threadblock/default_epilogue_direct_store.h" + ) + self.gemm_template_kernel = """ +// Gemm operator ${operation_name} +using ${operation_name}_base = + typename cutlass::gemm::kernel::DefaultGemmUniversal< + ${element_a}, ${layout_a}, ${transform_a}, ${align_a}, + ${element_b}, ${layout_b}, ${transform_b}, ${align_b}, + ${element_c}, ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}, + ${swizzling_functor}, + ${stages}, + ${math_operation} +>::GemmKernel; + +// Define named type +struct ${operation_name}${operation_suffix} : + public ${operation_name}_base { }; +""" + + self.gemm_template_device = """ +// Gemm operator ${operation_name} +using DeviceKernel = + typename cutlass::gemm::device::GemmUniversal< + // Data type and layout of operand A + ${element_a}, ${layout_a}, + // Data type and layout of operand B + ${element_b}, ${layout_b}, + // Data type and layout of operand C + ${element_c}, ${layout_c}, + // Data type of accumulator + ${element_accumulator}, + // Class of operation + ${opcode_class}, + // Compute capability of the target kernel + ${arch}, + // Threadblock tile shape + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + // Warp tile shape + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + // Instruction shape + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + // Epilogue functor + ${epilogue_functor}, + // Swizzling function + ${swizzling_functor}, + // Number of pipeline stages + ${stages}, + // Alignment of operands A and B + ${align_a}, ${align_b}, + // Type of math operation + ${math_operation}, + // Complex transform types of operands A and B + ${transform_a}, ${transform_b} + >; +""" + self.gemm_template_direct_store = """ +// Gemm operator ${operation_name} +using ${operation_name}_default = + typename cutlass::gemm::kernel::DefaultGemmUniversal< + ${element_a}, ${layout_a}, ${transform_a}, ${align_a}, + ${element_b}, ${layout_b}, ${transform_b}, ${align_b}, + ${element_c}, ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}, + ${swizzling_functor}, + ${stages}, + ${math_operation} +>::GemmKernel; + +using ${operation_name}_base = + cutlass::gemm::kernel::GemmUniversal< + ${operation_name}_default::Mma, + cutlass::epilogue::threadblock::DefaultEpilogueDirectStore< + ${operation_name}_default::Epilogue + >::Epilogue, + ${operation_name}_default::ThreadblockSwizzle + >; + +// Define named type +struct ${operation_name}${operation_suffix} : + public ${operation_name}_base { }; +""" + self.gemm_template_kernel_visitor = """ + +using OutputTileThreadMap = cutlass::epilogue::threadblock::OutputTileThreadLayout< + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + ${element_c}, + ${align_c}, + ${epilogue_stages} /* epilogue stages */ +>; + +${callback_decl} + +// Gemm operator ${operation_name} +using ${operation_name}_base = + typename cutlass::gemm::kernel::DefaultGemmWithVisitor< + ${element_a}, ${layout_a}, ${transform_a}, ${align_a}, + ${element_b}, ${layout_b}, ${transform_b}, ${align_b}, + ${element_c}, ${layout_c}, ${align_c}, + ${element_accumulator}, + ${element_epilogue}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${callback_name}, + ${swizzling_functor}, + ${stages}, + ${math_operation}, + ${epilogue_stages} /* epilogue stages */ +>::GemmKernel; + +// Define named type +struct ${operation_name}${operation_suffix} : + public ${operation_name}_base { }; +""" + + def instance_template(self): + return """ +${compile_guard_start} + manifest.append(new ${gemm_kind}< + cutlass::gemm::device::GemmUniversalAdapter<${operation_name}> + >("${operation_name}")); +${compile_guard_end} +""" + + def emit(self, operation): + threadblock_shape = operation.tile_description.threadblock_shape + warp_count = operation.tile_description.warp_count + + warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)] + + instance_layout_A, instance_layout_B, instance_layout_C = \ + (operation.A.layout, operation.B.layout, operation.C.layout) + + if operation.emission_type == EmissionType.Kernel: + if self.direct_store: + gemm_template = self.gemm_template_direct_store + else: + gemm_template = self.gemm_template_kernel + else: + gemm_template = self.gemm_template_device + + values = { + "operation_name": operation.procedural_name(), + "operation_suffix": self.operation_suffix, + "element_a": DataTypeTag[operation.A.element], + "layout_a": LayoutTag[instance_layout_A], + "element_b": DataTypeTag[operation.B.element], + "layout_b": LayoutTag[instance_layout_B], + "element_c": DataTypeTag[operation.C.element], + "layout_c": LayoutTag[instance_layout_C], + "element_accumulator": DataTypeTag[operation.accumulator_type()], + "opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + "arch": "cutlass::arch::Sm%d" % operation.arch, + "threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]), + "threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]), + "threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]), + "warp_shape_m": str(warp_shape[0]), + "warp_shape_n": str(warp_shape[1]), + "warp_shape_k": str(warp_shape[2]), + "instruction_shape_m": str(operation.tile_description.math_instruction.instruction_shape[0]), + "instruction_shape_n": str(operation.tile_description.math_instruction.instruction_shape[1]), + "instruction_shape_k": str(operation.tile_description.math_instruction.instruction_shape[2]), + "swizzling_functor": SwizzlingFunctorTag[operation.swizzling_functor], + "stages": str(operation.tile_description.stages), + "align_a": str(operation.A.alignment), + "align_b": str(operation.B.alignment), + "transform_a": ComplexTransformTag[operation.A.complex_transform], + "transform_b": ComplexTransformTag[operation.B.complex_transform], + "math_operation": MathOperationTag[operation.tile_description.math_instruction.math_operation], + } + + if hasattr(operation.epilogue_functor, "visitor"): + self.includes += [ + "cutlass/epilogue/threadblock/fusion/visitors.hpp", + "cutlass/gemm/kernel/default_gemm_universal_with_visitor.h" + ] + callback_name, callback_decl = operation.epilogue_functor.emit(operation) + values["callback_name"] = callback_name + values["callback_decl"] = callback_decl + values["align_c"] = str(operation.C.alignment) + values["element_epilogue"] = DataTypeTag[operation.epilogue_functor.element_epilogue] + if hasattr(operation.epilogue_functor, "epilogue_stages"): + epilogue_stages = operation.epilogue_functor.epilogue_stages + else: + epilogue_stages = 1 + values["epilogue_stages"] = str(epilogue_stages) + return SubstituteTemplate(self.gemm_template_kernel_visitor, values) + else: + values["epilogue_functor"] = operation.epilogue_functor.emit() + return SubstituteTemplate(gemm_template, values) + + +class EmitGemmGroupedInstance: + """Responsible for emitting a CUTLASS template definition""" + + def __init__(self, operation_suffix=""): + self.operation_suffix = operation_suffix + self.includes = [ + "cutlass/cutlass.h", + "cutlass/numeric_types.h", + "cutlass/arch/arch.h", + "cutlass/arch/mma.h", + "cutlass/layout/matrix.h", + "cutlass/gemm/kernel/gemm_grouped.h", + "cutlass/gemm/kernel/default_gemm_grouped.h", + ] + self.gemm_template_kernel = """ +// Gemm operator ${operation_name} +using ${operation_name}_base = + typename cutlass::gemm::kernel::DefaultGemmGrouped< + ${element_a}, ${layout_a}, ${transform_a}, ${align_a}, + ${element_b}, ${layout_b}, ${transform_b}, ${align_b}, + ${element_c}, ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}, + ${swizzling_functor}, + ${stages}, + ${precompute_mode}, + ${math_operation} +>::GemmKernel; + +// Define named type +struct ${operation_name}${operation_suffix} : + public ${operation_name}_base { }; +""" + self.gemm_template_device = ( + self.gemm_template_kernel + + """ +using DeviceKernel = cutlass::gemm::device::GemmGrouped<${operation_name}_base>; +""" + ) + + def instance_template(self): + return """ +${compile_guard_start} + manifest.append(new ${gemm_kind}< + cutlass::gemm::device::GemmGrouped<${operation_name}> + >("${operation_name}")); +${compile_guard_end} +""" + + def emit(self, operation): + threadblock_shape = operation.tile_description.threadblock_shape + warp_count = operation.tile_description.warp_count + + warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)] + + instance_layout_A, instance_layout_B, instance_layout_C = \ + (operation.A.layout, operation.B.layout, operation.C.layout) + + # Support built-in epilogue functors or user-defined functions + epilogue_functor = operation.epilogue_functor.emit() + + values = { + "operation_name": operation.procedural_name(), + "operation_suffix": self.operation_suffix, + "element_a": DataTypeTag[operation.A.element], + "layout_a": LayoutTag[instance_layout_A], + "element_b": DataTypeTag[operation.B.element], + "layout_b": LayoutTag[instance_layout_B], + "element_c": DataTypeTag[operation.C.element], + "layout_c": LayoutTag[instance_layout_C], + "element_accumulator": DataTypeTag[operation.accumulator_type()], + "opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + "arch": "cutlass::arch::Sm%d" % operation.arch, + "threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]), + "threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]), + "threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]), + "warp_shape_m": str(warp_shape[0]), + "warp_shape_n": str(warp_shape[1]), + "warp_shape_k": str(warp_shape[2]), + "instruction_shape_m": str(operation.tile_description.math_instruction.instruction_shape[0]), + "instruction_shape_n": str(operation.tile_description.math_instruction.instruction_shape[1]), + "instruction_shape_k": str(operation.tile_description.math_instruction.instruction_shape[2]), + "epilogue_functor": epilogue_functor, + "swizzling_functor": SwizzlingFunctorTag[operation.swizzling_functor], + "stages": str(operation.tile_description.stages), + "align_a": str(operation.A.alignment), + "align_b": str(operation.B.alignment), + "transform_a": ComplexTransformTag[operation.A.complex_transform], + "transform_b": ComplexTransformTag[operation.B.complex_transform], + "precompute_mode": SchedulerModeTag[operation.precompute_mode], + "math_operation": MathOperationTag[operation.tile_description.math_instruction.math_operation], + } + + if operation.emission_type == EmissionType.Kernel: + gemm_template = self.gemm_template_kernel + else: + gemm_template = self.gemm_template_device + + return SubstituteTemplate(gemm_template, values) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/library.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/library.py new file mode 100644 index 0000000000000000000000000000000000000000..62939a521c3329437160f3306a86de441de3c41c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/library.py @@ -0,0 +1,497 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Common data types and string names/tags for them +""" + +import enum + +from cutlass import ( + ComplexTransform, + DataType, + DataTypeSize, + EpilogueScheduleType, + KernelScheduleType, + MathOperation, + OpcodeClass, + TileSchedulerType +) + + +# The following block implements enum.auto() for Python 3.5 variants that don't include it such +# as the default 3.5.2 on Ubuntu 16.04. +# +# https://codereview.stackexchange.com/questions/177309/reimplementing-pythons-enum-auto-for-compatibility + +try: + from enum import auto as enum_auto +except ImportError: + __cutlass_library_auto_enum = 0 + + def enum_auto() -> int: + global __cutlass_library_auto_enum + i = __cutlass_library_auto_enum + __cutlass_library_auto_enum += 1 + return i + + +class DataTypeSizeBytes: + """ + Static class to mimic the `DataTypeSize` dictionary, but with checks for whether the + data type key is less than a full byte or a non-integer number of bytes. + """ + + @staticmethod + def __class_getitem__(datatype): + """ + Returns the number of bytes in size the data type is. Raises an exception if the data type + is either less than a full byte or a non-integer number of bytes in size. + + :param datatype: data type to query + + :return: number of bytes the data type occupies + :rtype: int + """ + bits = DataTypeSize[datatype] + if bits < 8: + raise Exception( + f"Data type {datatype} is less than one byte in size." + ) + elif bits % 8 != 0: + raise Exception( + f"Data type datatype is not an integer number of bytes." + ) + return bits // 8 + + +SharedMemPerCC = { + 70: 96 << 10, # 96KB of SMEM + 72: 96 << 10, # 96KB of SMEM + 75: 64 << 10, # 64KB of SMEM + 80: 160 << 10, # 164KB of SMEM - 4KB reserved for the driver + 86: 100 << 10, # 100KB of SMEM + 87: 160 << 10, # 164KB of SMEM - 4KB reserved for the driver + 89: 100 << 10, # 100KB of SMEM + 90: 227 << 10, # 228KB of SMEM - 1KB reserved for the driver +} + + +class SchedulerMode(enum.Enum): + Device = enum_auto() + Host = enum_auto() + + +SchedulerModeTag = { + SchedulerMode.Device: "cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly", + SchedulerMode.Host: "cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute", +} + + +ShortSchedulerModeNames = {SchedulerMode.Device: "Device", SchedulerMode.Host: "Host"} + + +class FunctionalOp(enum.Enum): + AtomicAdd = enum_auto() + AtomicMaximum = enum_auto() + Divides = enum_auto() + Maximum = enum_auto() + Minimum = enum_auto() + Minus = enum_auto() + Multiplies = enum_auto() + MultiplyAdd = enum_auto() + Plus = enum_auto() + + +FunctionalOpTag = { + FunctionalOp.AtomicAdd: "cutlass::atomic_add", + FunctionalOp.AtomicMaximum: "cutlass::atomic_maximum", + FunctionalOp.Divides: "cutlass::divides", + FunctionalOp.Maximum: "cutlass::maximum", + FunctionalOp.Minimum: "cutlass::minimum", + FunctionalOp.Minus: "cutlass::minus", + FunctionalOp.Multiplies: "cutlass::multiplies", + FunctionalOp.MultiplyAdd: "cutlass::multiply_add", + FunctionalOp.Plus: "cutlass::plus", +} + + +class ActivationOp(enum.Enum): + DGelu = enum_auto() + Gelu = enum_auto() + GeluTaylor = enum_auto() + HardSwish = enum_auto() + Identity = enum_auto() + LeakyReLU = enum_auto() + ReLU = enum_auto() + Sigmoid = enum_auto() + SiLU = enum_auto() + Tanh = enum_auto() + + +ActivationOpTag = { + ActivationOp.DGelu: "cutlass::epilogue::thread::dGELU", + ActivationOp.Gelu: "cutlass::epilogue::thread::GELU", + ActivationOp.GeluTaylor: "cutlass::epilogue::thread::GELU_taylor", + ActivationOp.HardSwish: "cutlass::epilogue::thread::HardSwish", + ActivationOp.Identity: "cutlass::epilogue::thread::Identity", + ActivationOp.LeakyReLU: "cutlass::epilogue::thread::LeakyReLU", + ActivationOp.ReLU: "cutlass::epilogue::thread::ReLu", + ActivationOp.Sigmoid: "cutlass::epilogue::thread::Sigmoid", + ActivationOp.SiLU: "cutlass::epilogue::thread::SiLu", + ActivationOp.Tanh: "cutlass::epilogue::thread::Tanh", +} + + +def op_tag(op) -> str: + """ + Dispatches `op` to the appropriate *Tag dictionary depending on whether + `op` is an ActivationOp or FunctionalOp. This is useful for cases in which + either type can be used. + + :param op: operation to emit a tag for + :type op: ActivationOp | FunctionalOp + + :return: tag corresponding to op + :rtype: str + """ + if isinstance(op, ActivationOp): + return ActivationOpTag[op] + elif isinstance(op, FunctionalOp): + return FunctionalOpTag[op] + else: + raise Exception(f"Unexpected op type {op}. Must be one of ActivationOp or FunctionalOp.") + + +class FloatRoundStyle(enum.Enum): + ToNearest = enum_auto() + ToNearestSatfinite = enum_auto() + Indeterminate = enum_auto() + TowardZero = enum_auto() + TowardInfinity = enum_auto() + TowardNegInfinity = enum_auto() + HalfUlpTruncDntz = enum_auto() + HalfUlpTruncate = enum_auto() + + +FloatRoundStyleTag = { + FloatRoundStyle.ToNearest: "cutlass::FloatRoundStyle::round_to_nearest", + FloatRoundStyle.ToNearestSatfinite: "cutlass::FloatRoundStyle::round_to_nearest_satfinite", + FloatRoundStyle.Indeterminate: "cutlass::FloatRoundStyle::round_indeterminate", + FloatRoundStyle.TowardZero: "cutlass::FloatRoundStyle::round_toward_zero", + FloatRoundStyle.TowardInfinity: "cutlass::FloatRoundStyle::round_toward_infinity", + FloatRoundStyle.TowardNegInfinity: "cutlass::FloatRoundStyle::round_toward_neg_infinity", + FloatRoundStyle.HalfUlpTruncDntz: "cutlass::FloatRoundStyle::round_half_ulp_trunc_dntz", + FloatRoundStyle.HalfUlpTruncate: "cutlass::FloatRoundStyle::round_half_ulp_truncate", +} + + +class MathInstruction: + """ + Description of a the lowest-level matrix-multiply-accumulate operation to be used in a kernel + """ + + def __init__( + self, + instruction_shape, + element_a, + element_b, + element_accumulator, + opcode_class=OpcodeClass.Simt, + math_operation=MathOperation.multiply_add, + ): + """ + :param instruction_shape: size of the [M, N, K] dimensions of the instruction + :type instruction_shape: list or tuple + :param element_a: data type of operand A + :param element_b: data type of operand B + :param element_accumulator: data type used in accumulation + :param opcode_class: higher-level class of the instruction (e.g., SIMT or Tensor Core) + :type opcode_class: cutlass_library.library.OpcodeClass + :param math_operation: the type of low-level operation to be performed (e.g., multiply accumulate) + :type math_operation: MathOperation + """ + self.instruction_shape = instruction_shape + self.element_a = element_a + self.element_b = element_b + self.element_accumulator = element_accumulator + self.opcode_class = opcode_class + self.math_operation = math_operation + + +class TileDescription: + """ + Description of a tile of computation to be performed in the kernel, encompassing threadblock, cluster, and warp shapes, + stage count, and math instruction specification + """ + + def __init__( + self, + threadblock_shape, + stages, + warp_count, + math_instruction, + cluster_shape=[1, 1, 1], + kernel_schedule: KernelScheduleType = None, + epilogue_schedule: EpilogueScheduleType = None, + tile_scheduler: TileSchedulerType = None + ): + """ + :param threadblock_shape: shape of a threadblock tyle + :type threadblock_shape: list or tuple + :param stages: number of pipline stages in the operation. For SM90 kernels, this can be set to `None` and the maximum + number of stages that can be supported for an operation on a given architecture will be computed at a later time + :type stages: int or None + :param warp_count: number of warps in each [M, N, K] dimension of a threadblock tile + :type warp_count: list, tuple, or None + :param math_instruction: specification of the instruction type and shape to be performed and the types of its operands + :type math_instruction: MathInstruction + :param cluster_shape: number of threadblocks in the [X, Y, Z] dimensions of a threadblock cluster + :param kernel_schedule: type of kernel schedule to use (only available for SM90+) + :type kernel_schedule: cutlass.KernelScheduleType + :param epilogue_schedule: type of epilogue schedule to use (only available for SM90+) + :type epilogue_schedule: cutlass.EpilogueScheduleType + :param tile_scheduler: type of tile scheduler to use (only available for SM90+) + :type tile_scheduler: cutlass.TileSchedulerType + """ + if ((kernel_schedule is None and epilogue_schedule is not None) or + (kernel_schedule is not None and epilogue_schedule is None)): + raise Exception("Kernel and epilogue schedule must either both be Auto or neither be Auto.") + + self.threadblock_shape = threadblock_shape + self.cluster_shape = cluster_shape + self.kernel_schedule = kernel_schedule + self.epilogue_schedule = epilogue_schedule + self.tile_scheduler = tile_scheduler + self.stages = stages + + self.math_instruction = math_instruction + self.instruction_shape = math_instruction.instruction_shape + + # Number of warps along x, y, z directions + self.warp_count = warp_count + + def clone_and_update(self, td: dict): + attrs = { + "cluster_shape": None, + "threadblock_shape": None, + "warp_count": None, + "stages": None, + "instruction_shape": None, + "kernel_schedule": None, + "epilogue_schedule": None, + "tile_scheduler": None + } + for key in attrs.keys(): + if key in td.keys(): + attrs[key] = td[key] + else: + attrs[key] = getattr(self, key) + + attrs["math_instruction"] = MathInstruction( + attrs["instruction_shape"], + self.math_instruction.element_a, + self.math_instruction.element_b, + self.math_instruction.element_accumulator, + self.math_instruction.opcode_class, + self.math_instruction.math_operation + ) + + # Remove the instruction shape + del attrs["instruction_shape"] + + return TileDescription(**attrs) + + @property + def num_threads(self): + """ + Returns the number of threads in the threadblock + + :return: number of threads in the threadblock + :rtype: int or None (if warp count is None) + """ + if self.warp_count is not None: + threads = 32 + for cnt in self.warp_count: + threads *= cnt + return threads + return None + + def procedural_name(self): + """ + Returns a name identifying the tile description + + :return: name identifying the tile description + :rtype: int + """ + emit_stages = 0 if self.stages is None else self.stages + name = "%dx%dx%d_%dx%d_%dx%d" % ( + self.cluster_shape[0], + self.cluster_shape[1], + self.cluster_shape[2], + self.threadblock_shape[0], + self.threadblock_shape[1], + self.threadblock_shape[2], + emit_stages + ) + + return name + + def procedural_name_2x(self): + """ + Returns a name identifying the tile description + + :return: name identifying the tile description + :rtype: int + """ + return "%dx%d_%dx%d" % (self.threadblock_shape[0], self.threadblock_shape[1], self.threadblock_shape[2], self.stages) + + def __str__(self): + """ + Returns a string with containing each of the tile description's values + + :return: contents of tile description + :rtype: str + """ + if self.kernel_schedule is not None: + kschedule = self.kernel_schedule + else: + kschedule = KernelScheduleType.ScheduleAuto + + if self.epilogue_schedule is not None: + eschedule = self.epilogue_schedule + else: + eschedule = EpilogueScheduleType.ScheduleAuto + + if self.tile_scheduler is not None: + tschedule = self.tile_scheduler.name + else: + tschedule = "None" + return f""" +{{ + ClusterShape: {self.cluster_shape} + ThreadblockShape: {self.threadblock_shape} + WarpCount: {self.warp_count} + Stages: {self.stages if self.stages is not None else 'Auto'} + InstructionShape: {self.math_instruction.instruction_shape} + Kernel schedule: {kschedule.name} + Epilogue schedule: {kschedule.name} + TileScheduler: {tschedule} +}}""" + + +class TensorDescription: + def __init__(self, element, layout, alignment=1, complex_transform=ComplexTransform.none): + self.element = element + self.layout = layout + self.alignment = min(128 // DataTypeSize[self.element], alignment) + self.complex_transform = complex_transform + + +def CalculateSmemUsagePerStage(operation): + """ + Returns the amount of shared memory in bytes consumed in a single stage of a kernel. + + :param op: operation for which the maximum stages should be computed. If stages are + set via the `op.tile_description.stages` parameter, this setting is ignored + in the present calculation + :type op: cutlass.backend.Operation + + :return: number of bytes of shared memory consumed by a single stage + :rtype: int + """ + m, n, k = operation.tile_description.threadblock_shape + + if operation.operation_kind == OperationKind.Gemm: + stage_barrier_bytes = 32 + return ( + (DataTypeSize[operation.A.element] * m * k // 8) + + (DataTypeSize[operation.B.element] * k * n // 8) + + stage_barrier_bytes + ) + else: + raise Exception("Unsupported operation kind {}.".format(operation.operation_kind)) + + +def CalculateSmemUsage(operation): + """ + Returns the amount of shared memory in bytes consumed by a kernel. + + :param op: operation for which the maximum stages should be computed. If stages are + set via the `op.tile_description.stages` parameter, this setting is ignored + in the present calculation + :type op: cutlass.backend.Operation + + :return: int + """ + return operation.tile_description.stages * CalculateSmemUsagePerStage(operation) + + +class ApiVersion(enum.Enum): + """ + Differentiate between CUTLASS 2.x and 3.x API versions + """ + + v2x = enum_auto() + v3x = enum_auto() + + +def api_version(arch, opclass, dtype): + """ + Returns whether the architecture, opcode class, and datatype in question require using CUTLASS 2.x + or 3.x for code emission. + + :param arch: compute capability of device on which to run + :type arch: int + :param opclass: class of the operation being performed + :type opclass: cutlass.OpcodeClass + :param dtype: data type to be used in operation (assumes that ElementA and ElementB are the same) + :type dtype: cutlass.DataType + + :return: API version to be used in code emission + :rtype: ApiVersion + """ + if (arch >= 90 and + opclass == OpcodeClass.TensorOp and + (dtype != DataType.f64)): + return ApiVersion.v3x + else: + return ApiVersion.v2x + + +class EmissionType(enum.Enum): + """ + Tags for whether to emit a kernel- or device-level operation + """ + + Kernel = enum_auto() + Device = enum_auto() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/memory_manager.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/memory_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..7c759e64cc6c2c42ab4331fcc78c5b48273a5127 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/memory_manager.py @@ -0,0 +1,74 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +import numpy as np +import rmm + + +class PoolMemoryManager: + def __init__(self, init_pool_size: int, max_pool_size: int) -> None: + self.pool = rmm.mr.PoolMemoryResource( + rmm.mr.CudaMemoryResource(), + initial_pool_size=init_pool_size, + maximum_pool_size=max_pool_size + ) + self.mr = rmm.mr.TrackingResourceAdaptor(self.pool) + rmm.mr.set_current_device_resource(self.mr) + + def get_allocated_size(self): + return self.mr.get_allocated_bytes() + + def pool_size(self): + return self.pool.pool_size() + + +def todevice(host_data, dtype=np.float32): + """ + Pass the host_data to device memory + """ + if isinstance(host_data, list): + return rmm.DeviceBuffer.to_device(np.array(host_data, dtype=dtype).tobytes()) + elif isinstance(host_data, np.ndarray): + return rmm.DeviceBuffer.to_device(host_data.tobytes()) + + +def device_mem_alloc(size): + return rmm.DeviceBuffer(size=size) + + +def align_size(size, alignment=256): + return ((size + alignment - 1) // alignment) * alignment + + +def get_allocated_size(): + device_resource = rmm.mr.get_current_device_resource() + return device_resource.get_allocated_bytes() diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/operation.py new file mode 100644 index 0000000000000000000000000000000000000000..8a4d57d6499cb016754a15a1f966610d52752560 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/operation.py @@ -0,0 +1,127 @@ +################################################################################ +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################ + +import ctypes + +from cuda import __version__, cuda + +from cutlass.backend.utils.device import device_cc + +_version_splits = [int(x) for x in __version__.split("rc")[0].split(".")] +supports_cluster_launch = device_cc() >= 90 and ( + _version_splits[0] > 11 or (_version_splits[0] == 11 and _version_splits[1] >= 8) +) + + +class LaunchConfiguration: + def __init__(self, grid=[1, 1, 1], block=[1, 1, 1], smem=0): + self.grid = grid + self.block = block + self.shared_memory_capacity = smem + + +class ExecutableOperation: + def __init__(self, operation): + self.operation = operation + self.module = None + self.kernel = None + + def name(self): + return self.operation.procedural_name() + + def emit(self): + return "" + + def can_implement(self, configuration, arguments): + raise NotImplementedError() + + def get_host_workspace_size(self, arguments): + raise NotImplementedError() + + def get_device_workspace_size(self, arguments): + raise NotImplementedError() + + def plan(self, arguments): + raise NotImplementedError() + + def initialize(self, host_workspace, device_workspace, launch_config, arguments, stream=cuda.CUstream(0)): + raise NotImplementedError() + + def run_with_clusters(self, launch_config, kernel_params, stream=cuda.CUstream(0)): + if hasattr(self.operation, "tile_description") and hasattr(self.operation.tile_description, "cluster_shape"): + attr = cuda.CUlaunchAttribute() + attr.value.clusterDim.x, attr.value.clusterDim.y, attr.value.clusterDim.z = self.operation.tile_description.cluster_shape + attr.id = cuda.CUstreamAttrID.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION + attrs = [attr] + + # Allow for non-portable cluster sizes + err, = cuda.cuFuncSetAttribute( + self.kernel, cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED, 1) + if err != cuda.CUresult.CUDA_SUCCESS: + return err + else: + attrs = [] + + config = cuda.CUlaunchConfig() + config.gridDimX, config.gridDimY, config.gridDimZ = launch_config.grid + config.blockDimX, config.blockDimY, config.blockDimZ = launch_config.block + config.blockDimZ = launch_config.block[2] + config.sharedMemBytes = launch_config.shared_memory_capacity + config.hStream = stream + config.attrs = attrs + config.numAttrs = len(attrs) + + err, = cuda.cuLaunchKernelEx( + config, f=self.kernel, kernelParams=kernel_params, extra=0) + return err + + def run_without_clusters(self, launch_config, kernel_params, stream=cuda.CUstream(0)): + err, = cuda.cuLaunchKernel( + self.kernel, + launch_config.grid[0], launch_config.grid[1], launch_config.grid[2], + launch_config.block[0], launch_config.block[1], launch_config.block[2], + launch_config.shared_memory_capacity, + stream, + kernel_params, + 0) + + return err + + def run(self, host_workspace, device_workspace, launch_config, stream=cuda.CUstream(0)): + cArg = (ctypes.c_char * len(host_workspace)).from_buffer(host_workspace) + packed = (ctypes.c_void_p * 1)() + packed[0] = ctypes.addressof(cArg) + + if supports_cluster_launch: + return self.run_with_clusters(launch_config, packed, stream) + else: + return self.run_without_clusters(launch_config, packed, stream) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/reduction_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/reduction_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..9662017cc9758e1a1371d5aa370b383a87a2499f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/reduction_operation.py @@ -0,0 +1,435 @@ +################################################################################ +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################ + +import ctypes +from typing import Union + +from cuda import cuda, cudart +import numpy as np + +from cutlass import ( + DataTypeNames, + DataTypeSize, + DataTypeTag, + LayoutType +) +from cutlass.backend.c_types import MatrixCoord_, TensorRef2D_, get_reduction_params +from cutlass.backend.frontend import NumpyFrontend, TorchFrontend +from cutlass.backend.library import TensorDescription +from cutlass.backend.operation import ExecutableOperation, LaunchConfiguration +from cutlass.backend.utils.software import CheckPackages, SubstituteTemplate +from cutlass.shape import MatrixCoord + +if CheckPackages().check_torch(): + import torch + + +class ReductionOperation: + pass + + +class ReductionArguments: + """ + Arguments of reduction + """ + + def __init__( + self, + operation: ReductionOperation, + problem_size: "list[int]", + partitions: int, + workspace: cuda.CUdeviceptr, + destination: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor]", + source: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor]", + **kwargs, + ) -> None: + # tensor_C can be interpreted as the bias with bias=True in keyword args + if "bias" in kwargs.keys(): + self.bias = kwargs["bias"] + else: + # by default, tensor_C is not bias + self.bias = False + + self.operation = operation + self.ptr_workspace = workspace + + # number of split-k partitions + self.partitions = partitions + + if isinstance(destination, np.ndarray): + self.host_D = destination + self.destination_buffer = NumpyFrontend.argument(destination, True) + self.source_buffer = NumpyFrontend.argument(source, False) + self.ptr_destination = cuda.CUdeviceptr(self.destination_buffer.ptr) + self.ptr_source = cuda.CUdeviceptr(self.source_buffer.ptr) + elif CheckPackages().check_torch() and isinstance(destination, torch.Tensor): + self.ptr_destination = TorchFrontend.argument(destination) + self.ptr_source = TorchFrontend.argument(source) + elif isinstance(destination, cuda.CUdeviceptr): + self.ptr_destination = destination + self.ptr_source = source + else: + raise TypeError("unknown Type") + + self.problem_size = MatrixCoord_(problem_size[0], problem_size[1]) + + self.partition_stride = ( + problem_size[0] * problem_size[1] * DataTypeSize[operation.C.element] // 8 + ) + + if "output_op" in kwargs.keys(): + self.output_op = kwargs["output_op"] + else: + self.output_op = self.operation.epilogue_type(1.0, 0.0) + + self.get_arguments() + + @staticmethod + def get_tensor_ref( + extent: "tuple[int]", + device_ptr: cuda.CUdeviceptr, + layout: LayoutType, + ): + if layout == LayoutType.RowMajor: + return TensorRef2D_(int(device_ptr), extent[1]) + else: + raise ValueError(f"Unknown layout type {layout}") + + def get_arguments(self): + ref_workspace = ReductionArguments.get_tensor_ref( + extent=[ + self.problem_size.row, + self.problem_size.column, + ], + device_ptr=self.ptr_workspace, + layout=LayoutType.RowMajor, + ) + if self.bias: + ref_source = ReductionArguments.get_tensor_ref( + extent=[0, 0], + device_ptr=self.ptr_source, + layout=LayoutType.RowMajor, + ) + else: + ref_source = ReductionArguments.get_tensor_ref( + extent=[ + self.problem_size.row, + self.problem_size.column, + ], + device_ptr=self.ptr_source, + layout=LayoutType.RowMajor, + ) + + ref_destination = ReductionArguments.get_tensor_ref( + extent=[ + self.problem_size.row, + self.problem_size.column, + ], + device_ptr=self.ptr_destination, + layout=LayoutType.RowMajor, + ) + + self.c_arguments = self.operation.argument_type( + self.problem_size, + self.partitions, + self.partition_stride, + ref_workspace, + ref_destination, + ref_source, + self.output_op, + ) + + params_ = self.operation.rt_module.get_args(ctypes.byref(self.c_arguments)) + self.host_workspace = bytearray(params_.contents) + + def sync(self): + (err,) = cudart.cudaDeviceSynchronize() + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError(f"CUDA Error {str(err)}") + + if hasattr(self, "host_D"): + (err,) = cuda.cuMemcpyDtoH( + self.host_D, + self.ptr_destination, + self.host_D.size * self.host_D.itemsize, + ) + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError("CUDA Error %s" % str(err)) + + def free(self): + if hasattr(self, "destination_buffer"): + del self.destination_buffer + if hasattr(self, "source_buffer"): + del self.source_buffer + + +class ReductionRT(ExecutableOperation): + """ + ReductionRT manages the CUTLASS runtime components for reduction + """ + + KernelTemplate = r""" +extern "C" +__global__ void +${operation_name}(${operation_name}${operation_suffix}::Params params) { + + // Dynamic shared memory base pointer + extern __shared__ int SharedStorageBase[]; + + // Declare pointer to dynamic shared memory. + ${operation_name}${operation_suffix}::SharedStorage *shared_storage = + reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase); + + ${operation_name}${operation_suffix} op; + + op(params, *shared_storage); +} + """ + HostTemplate = r""" +extern "C" { + // Get the size of params in bytes + int ${operation_name}_get_param_size(){ + return sizeof(${operation_name}${operation_suffix}::Params); + } + + // Get the size of dynamic shared memory in bytes + int ${operation_name}_shared_memory_size() { + return int(sizeof(${operation_name}${operation_suffix}::SharedStorage)); + } + + // Get the params as byte array + char* ${operation_name}_get_params(${operation_name}${operation_suffix}::Params* params){ + char *bytes = ((char*)(params)); + char *output = new char[sizeof(${operation_name}${operation_suffix}::Params)]; + for (unsigned int i = 0; i < sizeof(${operation_name}${operation_suffix}::Params); i ++) + output[i] = bytes[i]; + + return output; + } +} + """ + + def __init__(self, operation: ReductionOperation): + super().__init__(operation) + + self.operation: ReductionOperation = operation + self.emitter = EmitReductionInstance("_type") + + self.elements_per_access = self.operation.count + ( + self.argument_type, + self.epilogue_type, + ) = get_reduction_params(operation.epilogue_functor) + self.argtype = [ctypes.POINTER(self.argument_type)] + + def emit(self): + return self.emitter.emit(self.operation) + + def plan(self, arguments: ReductionArguments): + block_shape = [ + self.operation.shape.column // self.elements_per_access, + self.operation.shape.row, + 1, + ] + grid_shape = [ + (arguments.problem_size.row + self.operation.shape.row - 1) + // self.operation.shape.row, + (arguments.problem_size.column + self.operation.shape.column - 1) + // self.operation.shape.column, + 1, + ] + return LaunchConfiguration( + grid_shape, + block_shape, + self.shared_memory_capacity, + ) + + def initialize(self): + (err,) = cuda.cuFuncSetAttribute( + self.kernel, + attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, + value=self.shared_memory_capacity, + ) + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError(f"CUDA Error: {err}") + + +class ReductionOperation: + """ + CUTLASS reduction Operation + """ + + def __init__( + self, + shape: MatrixCoord, + C: TensorDescription, + element_accumulator, + element_workspace=None, + element_compute=None, + epilogue_functor=None, + count: int = 1, + partitions_per_stage: int = 4, + ) -> None: + self.shape = shape + self.epilogue_functor = epilogue_functor + self.element_accumulator = element_accumulator + + if element_workspace is None: + self.element_workspace = element_accumulator + else: + self.element_workspace = element_workspace + + if element_compute is None: + self.element_compute = element_accumulator + else: + self.element_compute = element_compute + + self.element_output = C.element + self.C: TensorDescription = C + + # Reduce op processing size + self.count: int = count + + # Number of partitions to reduce per stage + self.partitions_per_stage: int = partitions_per_stage + + self.rt_module: ReductionRT = ReductionRT(self) + self.argument_type = self.rt_module.argument_type + self.epilogue_type = self.rt_module.epilogue_type + + def extended_name(self): + extend_name = "${element_workspace}_${element_accumulator}_${element_compute}_${element_output}" + + return SubstituteTemplate( + extend_name, + { + "element_workspace": DataTypeNames[self.element_workspace], + "element_accumulator": DataTypeNames[self.element_accumulator], + "element_compute": DataTypeNames[self.element_compute], + "element_output": DataTypeNames[self.element_output], + }, + ) + + def configuration_name(self): + """The full procedural name indicates architecture, extended name, tile size""" + + configuration_name = "cutlass_reduce_split_k_${extended_name}_${threadblock}" + + threadblock = "%dx%d" % ( + self.shape.row, + self.shape.column, + ) + + return SubstituteTemplate( + configuration_name, + { + "extended_name": self.extended_name(), + "threadblock": threadblock, + }, + ) + + def procedural_name(self): + """The full procedural name indicates architeture, extended name, tile size""" + return self.configuration_name() + + def run(self, arguments: ReductionArguments) -> cuda.CUresult: + """ + Configure and launch the cuda kernel with input arguments + """ + launch_config = self.rt_module.plan(arguments) + + host_workspace = arguments.host_workspace + device_workspace = None + + err = self.rt_module.run( + host_workspace, + device_workspace, + launch_config, + ) + + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError(f"CUDA Error {str(err)}") + + return err + + +class EmitReductionInstance: + def __init__(self, operation_suffix="") -> None: + self.operation_suffix = operation_suffix + self.includes = [ + "cutlass/cutlass.h", + "cutlass/numeric_types.h", + "cutlass/arch/arch.h", + "cutlass/arch/mma.h", + "cutlass/layout/matrix.h", + "cutlass/gemm/device/gemm.h", + "cutlass/gemm/device/gemm_universal_adapter.h", + "cutlass/gemm/kernel/default_gemm_universal.h", + "cutlass/reduction/kernel/reduce_split_k.h", + "cutlass/reduction/thread/reduction_operators.h", + ] + self.template = """ +// Reduction kernel instance +using ${operation_name}_base = +typename cutlass::reduction::kernel::ReduceSplitK< + cutlass::MatrixShape<${shape_row}, ${shape_column}>, + ${epilogue_functor}, + cutlass::reduction::thread::ReduceAdd< + ${element_accumulator}, + ${element_output}, + ${count}>, + ${partition_per_stage}>; + +struct ${operation_name}${operation_suffix}: + public ${operation_name}_base { }; + """ + + def emit(self, operation: ReductionOperation): + vector_length_bits = min(operation.C.alignment * DataTypeSize[operation.C.element], 128) + epilogue_vector_length = vector_length_bits // DataTypeSize[operation.C.element] + + values = { + "operation_name": operation.configuration_name(), + "operation_suffix": self.operation_suffix, + "shape_row": str(operation.shape.row), + "shape_column": str(operation.shape.column), + "epilogue_functor": operation.epilogue_functor.emit(), + "element_output": DataTypeTag[operation.element_output], + "epilogue_vector_length": str(epilogue_vector_length), + "element_accumulator": DataTypeTag[operation.element_accumulator], + "element_compute": DataTypeTag[operation.element_compute], + "element_workspace": DataTypeTag[operation.element_workspace], + "count": str(operation.count), + "partition_per_stage": str(operation.partitions_per_stage), + } + + return SubstituteTemplate(self.template, values) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/type_hint.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/type_hint.py new file mode 100644 index 0000000000000000000000000000000000000000..d1e8ba9102dffbd10f7ee094de0d8e19ee2aa5e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/type_hint.py @@ -0,0 +1,35 @@ +################################################################################ +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################ + +GemmOperation = "Union[GemmOperationUniversal, GemmOperationGrouped]" + +Tensor = "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..be36ad83371822be075d9eeafbccd9e1e078ac40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__init__.py @@ -0,0 +1,40 @@ +################################################################################ +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################ + +from cutlass.backend.utils.datatypes import * +from cutlass.backend.utils.device import check_cuda_errors, device_cc +from cutlass.backend.utils.software import ( + CheckPackages, + SubstituteTemplate, + device_sm_count, + get_memory_pool, +) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bce0fb35da7868b50dbe2a25b58c9a4723d535b6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__pycache__/datatypes.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__pycache__/datatypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9709c286c51eff2cfb4fcffe407d965d9a2644fe Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__pycache__/datatypes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__pycache__/device.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__pycache__/device.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..821de33c2edb7d03e24eb7e481b0a4ce0ebb5c07 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__pycache__/device.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__pycache__/software.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__pycache__/software.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7e783661ce0a77661c5d6ae3463952d2907b028 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/__pycache__/software.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/datatypes.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/datatypes.py new file mode 100644 index 0000000000000000000000000000000000000000..1140cb84ba81851beb0ea249f7368831e118acb1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/datatypes.py @@ -0,0 +1,156 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Utility functions for converting between frontend datatypes and CUTLASS datatypes +""" + +from cuda import cuda + +from cutlass import DataType +from cutlass.backend.utils.software import CheckPackages + +numpy_available = CheckPackages().check_numpy() +if numpy_available: + import numpy as np + + numpy_to_cutlass_dict = { + np.float16: DataType.f16, + np.float32: DataType.f32, + np.float64: DataType.f64, + np.int8: DataType.s8, + np.int32: DataType.s32, + np.dtype('float16'): DataType.f16, + np.dtype('float32'): DataType.f32, + np.dtype('float64'): DataType.f64, + np.dtype('int8'): DataType.s8, + np.dtype('int32'): DataType.s32, + } + + +def numpy_to_cutlass(inp): + numpy_available = CheckPackages().check_numpy() + if numpy_available: + return numpy_to_cutlass_dict.get(inp, None) + + +cupy_available = CheckPackages().check_cupy() +if cupy_available: + import cupy as cp + + cupy_to_cutlass_dict = { + cp.float16: DataType.f16, + cp.float32: DataType.f32, + cp.float64: DataType.f64, + } + + +def cupy_to_cutlass(inp): + cupy_available = CheckPackages().check_cupy() + if cupy_available: + return cupy_to_cutlass_dict.get(inp, None) + + +torch_available = CheckPackages().check_torch() +if torch_available: + import torch + + torch_to_cutlass_dict = { + torch.half: DataType.f16, + torch.float16: DataType.f16, + torch.float: DataType.f32, + torch.float32: DataType.f32, + torch.double: DataType.f64, + torch.float64: DataType.f64, + } + + +def torch_to_cutlass(inp): + if torch_available: + return torch_to_cutlass_dict.get(inp, None) + + +try: + import bfloat16 + + bfloat16_available = True + numpy_to_cutlass_dict[np.dtype(bfloat16.bfloat16)] = DataType.bf16 +except ImportError: + bfloat16_available = False + + +def bfloat16_to_cutlass(inp): + if bfloat16_available: + if inp == bfloat16.bfloat16: + return DataType.bf16 + + +def to_cutlass(inp): + for cvt_fn in [ + bfloat16_to_cutlass, + cupy_to_cutlass, + numpy_to_cutlass, + torch_to_cutlass, + ]: + out = cvt_fn(inp) + if out is not None: + return out + + raise Exception( + "No available conversion from type {} to a CUTLASS type.".format(inp) + ) + + +def to_device_ptr(tensor) -> cuda.CUdeviceptr: + """ + Converts a tensor to a CUdeviceptr + + :param tensor: tensor to convert + :type tensor: np.ndarray | torch.Tensor | cp.ndarray | int + + :return: device pointer + :rtype: cuda.CUdeviceptr + """ + if isinstance(tensor, np.ndarray): + ptr = cuda.CUdeviceptr(tensor.__array_interface__["data"][0]) + elif torch_available and isinstance(tensor, torch.Tensor): + ptr = cuda.CUdeviceptr(tensor.data_ptr()) + elif cupy_available and isinstance(tensor, cp.ndarray): + ptr = cuda.CUdeviceptr(int(tensor.data.ptr)) + elif isinstance(tensor, cuda.CUdeviceptr): + ptr = tensor + elif isinstance(tensor, int): + ptr = cuda.CUdeviceptr(tensor) + else: + raise NotImplementedError(tensor) + + return ptr diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/device.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/device.py new file mode 100644 index 0000000000000000000000000000000000000000..15e5457f558f03d72af46412008e830a1617cc17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/device.py @@ -0,0 +1,76 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Utility functions for interacting with the device +""" + +from cuda import cudart + + +def check_cuda_errors(result: list): + """ + Checks whether `result` contains a CUDA error raises the error as an exception, if so. Otherwise, + returns the result contained in the remaining fields of `result`. + + :param result: the results of the `cudart` method, consisting of an error code and any method results + :type result: list + + :return: non-error-code results from the `results` parameter + """ + # `result` is of the format : (cudaError_t, result...) + err = result[0] + if err.value: + raise RuntimeError("CUDA error: {}".format(cudart.cudaGetErrorName(err))) + + if len(result) == 1: + return None + elif len(result) == 2: + return result[1] + else: + return result[1:] + + +def device_cc(device: int = 0) -> int: + """ + Returns the compute capability of the device with ID `device`. + + :param device: ID of the device to query + :type device: int + + :return: compute capability of the queried device (e.g., 80 for SM80) + :rtype: int + """ + deviceProp = check_cuda_errors(cudart.cudaGetDeviceProperties(device)) + major = str(deviceProp.major) + minor = str(deviceProp.minor) + return int(major + minor) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/software.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/software.py new file mode 100644 index 0000000000000000000000000000000000000000..9f099b8a2990a628eca803bcee3e5a098c10d41f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/backend/utils/software.py @@ -0,0 +1,111 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +import re +import sys + +from cutlass.backend.memory_manager import PoolMemoryManager + + +class CheckPackages: + def __init__(self) -> None: + pass + + def check_cupy(self): + if "cupy" in sys.modules: + return True + else: + try: + import cupy + + cupy_available = True + except ImportError: + print("cupy is not loaded.") + + def check_numpy(self): + if "numpy" in sys.modules: + return True + else: + try: + import numpy + + numpy_available = True + except ImportError: + print("numpy is not loaded.") + + def check_torch(self): + if "torch" in sys.modules: + return True + else: + try: + import torch + + torch_available = True + except ImportError: + print("torch is not loaded.") + + +def SubstituteTemplate(template, values): + text = template + changed = True + while changed: + changed = False + for key, value in values.items(): + regex = "\\$\\{%s\\}" % key + newtext = re.sub(regex, value, text) + if newtext != text: + changed = True + text = newtext + return text + + +def device_sm_count(): + from cuda import cuda + + _device = 0 + err, _device_sm_count = cuda.cuDeviceGetAttribute( + cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, _device + ) + if err != cuda.CUresult.CUDA_SUCCESS: + raise Exception( + "Failed to retireve SM count. " + f"cuDeviceGetAttribute() failed with error: {cuda.cuGetErrorString(err)[1]}" + ) + + return _device_sm_count + + +def get_memory_pool(init_pool_size=0, max_pool_size=2 ** 34): + memory_pool = PoolMemoryManager( + init_pool_size=init_pool_size, max_pool_size=max_pool_size + ) + return memory_pool diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..52200ca7934909c35b894462e5df915f169b09ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/__init__.py @@ -0,0 +1,33 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +from cutlass.emit.pytorch import pytorch diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dc1ae4c7f5fc70092293351081cb01abf3de5c3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd3ea4d9823c5770be4ac7d2eb9f4141cb378ba6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/__pycache__/pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/__pycache__/pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6964148e999da837416b0a27b8405b9ec7f81b4e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/__pycache__/pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/common.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/common.py new file mode 100644 index 0000000000000000000000000000000000000000..c52818ca30a38e77baf1f30366095e889939512f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/common.py @@ -0,0 +1,265 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Common utilities for emitting CUTLASS kernels +""" + +import cutlass + +# Strings used for printing information about the generation of emitted scripts +_AUTOGEN_STR = f"This file was automatically generated by the CUTLASS {cutlass.__version__} Python interface (https://github.com/nvidia/cutlass/python)" + + +_CSTYLE_AUTOGEN_COMMENT = f"""// {_AUTOGEN_STR} +""" + + +_PYSTYLE_AUTOGEN_COMMENT = f"""# {_AUTOGEN_STR} +""" + +_CUTLASS_KERNEL_ARGS_2x = """ + typename DeviceKernel::Arguments arguments { + cutlass::gemm::GemmUniversalMode::kGemm, + {M, N, K}, // problem size + 1, + {alpha, beta}, + A, B, C, D, + 0, 0, 0, 0, // batch strides + DeviceKernel::LayoutA::packed({M, K}).stride(0), // lda + DeviceKernel::LayoutB::packed({K, N}).stride(0), // ldb + DeviceKernel::LayoutC::packed({M, N}).stride(0), // ldc + DeviceKernel::LayoutC::packed({M, N}).stride(0) // ldd + }; +""" + +_CUTLASS_KERNEL_ARGS_2x_STREAM_K = """ + typename DeviceKernel::Arguments arguments { + cutlass::gemm::GemmUniversalMode::kGemm, + {M, N, K}, // problem size + 1, + {alpha, beta}, + A, B, C, D, + 0, 0, 0, 0, // batch strides + DeviceKernel::LayoutA::packed({M, K}).stride(0), // lda + DeviceKernel::LayoutB::packed({K, N}).stride(0), // ldb + DeviceKernel::LayoutC::packed({M, N}).stride(0), // ldc + DeviceKernel::LayoutC::packed({M, N}).stride(0), // ldd + -1 // avail_sms + }; +""" + +_CUTLASS_KERNEL_RUN_GEMM_2x = """ +using ElementCompute = typename DeviceKernel::EpilogueOutputOp::ElementCompute; + +cutlass::Status ${name}_kernel_run(int M, int N, int K, + const DeviceKernel::ElementA* A, const DeviceKernel::ElementB* B, const DeviceKernel::ElementC* C, DeviceKernel::ElementC* D, + ElementCompute alpha, ElementCompute beta) { + ${args} + size_t workspace_size = DeviceKernel::get_workspace_size(arguments); + cutlass::device_memory::allocation workspace(workspace_size); + + DeviceKernel gemm_op; + cutlass::Status status = gemm_op.initialize(arguments, + workspace.get(), + nullptr); // CUDA stream + + if (status != cutlass::Status::kSuccess) { + return status; + } + + status = gemm_op(); + return status; +} +""" + +_CUTLASS_KERNEL_RUN_GEMM_3x = """ +using StrideA = typename DeviceKernel::GemmKernel::StrideA; +using StrideB = typename DeviceKernel::GemmKernel::StrideB; +using StrideC = typename DeviceKernel::GemmKernel::StrideC; +using StrideD = typename DeviceKernel::GemmKernel::StrideD; + +using ElementCompute = typename DeviceKernel::EpilogueOutputOp::ElementCompute; + +cutlass::Status ${name}_kernel_run( + int M, int N, int K, int L, + const DeviceKernel::ElementA* A, const DeviceKernel::ElementB* B, const DeviceKernel::ElementC* C, DeviceKernel::ElementC* D, + ElementCompute alpha, ElementCompute beta, const cutlass::KernelHardwareInfo& hw_info) { + + typename DeviceKernel::Arguments arguments{ + cutlass::gemm::GemmUniversalMode::kGemm, + {M, N, K, L}, // problem size + A, // ptrA + cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(M, K, L)), // stride A + B, // ptrB + cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(N, K, L)), // stride B + { + C, // ptrC + cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M, N, L)), // stride C + D, // ptrD + cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M, N, L)), // stride D + {alpha, beta}, + }, + hw_info + }; + + size_t workspace_size = DeviceKernel::get_workspace_size(arguments); + cutlass::device_memory::allocation workspace(workspace_size); + + DeviceKernel gemm_op; + cutlass::Status status = gemm_op.run(arguments, + workspace.get(), + nullptr); // CUDA stream + + return status; +} +""" + + +_CUTLASS_KERNEL_RUN_GROUPED_GEMM_2x = """ +using ElementCompute = typename DeviceKernel::EpilogueOutputOp::ElementCompute; + +int threadblock_count = DeviceKernel::sufficient(); + +cutlass::Status ${name}_kernel_run(int problem_count, cutlass::gemm::GemmCoord* problem_sizes, + DeviceKernel::ElementA** A, DeviceKernel::ElementB** B, DeviceKernel::ElementC** C, DeviceKernel::ElementC** D, + int64_t* lda, int64_t* ldb, int64_t* ldc, int64_t* ldd, + ElementCompute alpha, ElementCompute beta) { + + typename DeviceKernel::Arguments arguments { + problem_sizes, + problem_count, + threadblock_count, + {alpha, beta}, + A, B, C, D, + lda, ldb, ldc, ldd + }; + + size_t workspace_size = DeviceKernel::get_workspace_size(arguments); + cutlass::device_memory::allocation workspace(workspace_size); + + DeviceKernel gemm_op; + cutlass::Status status = gemm_op.initialize(arguments, + workspace.get(), + nullptr); // CUDA stream + + if (status != cutlass::Status::kSuccess) { + return status; + } + + status = gemm_op(); + return status; +} +""" + + +_CUTLASS_KERNEL_RUN_CONV2D_2x = """ + +using UnderlyingKernel = typename DeviceKernel::UnderlyingKernel; +namespace { +using TensorRefA = typename UnderlyingKernel::TensorRefA; +using TensorRefB = typename UnderlyingKernel::TensorRefB; +using TensorRefC = typename UnderlyingKernel::TensorRefC; +using ElementCompute = typename UnderlyingKernel::EpilogueOutputOp::ElementCompute; +} + +template +TensorRef get_tensor_ref(cutlass::Tensor4DCoord tensor_coord, Element* ptr){ + cutlass::layout::TensorNHWC layout = cutlass::layout::TensorNHWC::packed(tensor_coord); + TensorRef tensor_ref(ptr, layout); + return tensor_ref; +} + +cutlass::Status ${name}_kernel_run(cutlass::conv::Conv2dProblemSize* problem_size, + UnderlyingKernel::ElementA* A, UnderlyingKernel::ElementB* B, + UnderlyingKernel::ElementC* C, UnderlyingKernel::ElementC* D, + ElementCompute alpha, ElementCompute beta, std::string split_k_mode, + cudaStream_t stream, int device_id=0) { + // create the tensor references + cutlass::Tensor4DCoord tensor_coord_A = cutlass::conv::implicit_gemm_tensor_a_extent( + cutlass::conv::Operator::k${conv_kind_name}, *problem_size + ); + cutlass::Tensor4DCoord tensor_coord_B = cutlass::conv::implicit_gemm_tensor_b_extent( + cutlass::conv::Operator::k${conv_kind_name}, *problem_size + ); + cutlass::Tensor4DCoord tensor_coord_C = cutlass::conv::implicit_gemm_tensor_c_extent( + cutlass::conv::Operator::k${conv_kind_name}, *problem_size + ); + + TensorRefA tensor_ref_A = get_tensor_ref(tensor_coord_A, A); + TensorRefB tensor_ref_B = get_tensor_ref(tensor_coord_B, B); + TensorRefC tensor_ref_C = get_tensor_ref(tensor_coord_C, C); + TensorRefC tensor_ref_D = get_tensor_ref(tensor_coord_C, D); + + cutlass::conv::SplitKMode mode; + if (split_k_mode == "serial") { + mode = cutlass::conv::SplitKMode::kSerial; + } else if (split_k_mode == "parallel") { + mode = cutlass::conv::SplitKMode::kParallel; + } else { + throw std::runtime_error("Invalid split_k_mode: " + split_k_mode); + } + + typename DeviceKernel::Arguments arguments{ + *problem_size, + tensor_ref_A, + tensor_ref_B, + tensor_ref_C, + tensor_ref_D, + {alpha, beta}, + mode + }; + + DeviceKernel implicit_gemm_op; + + size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments); + + void* workspace_ptr = device_memory_allocation(workspace_size, device_id); + + cutlass::Status status = implicit_gemm_op.can_implement(arguments); + if (status != cutlass::Status::kSuccess) { + return status; + } + + status = implicit_gemm_op.initialize(arguments, workspace_ptr, stream); + if (status != cutlass::Status::kSuccess) { + return status; + } + + // + // Launch initialized CUTLASS kernel + // + status = implicit_gemm_op(stream); + + return status; +} +""" diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/pytorch.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..737f5cdf347057d76609f34c25328312cb2173cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/emit/pytorch.py @@ -0,0 +1,921 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Utilities for generating source for building a PyTorch CUDA extension that using a CUTLASS kernel. +If specified, the extension can be JIT compiled via PyTorch's ``cpp_extension.load`` method. + +Example usage with JIT compilation: + +.. highlight:: python +.. code-block:: python + + plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass.LayoutType.RowMajor) + op = plan.construct() + mod = cutlass.emit.pytorch(op, 'cutlass_gemm', 80, jit=True) + + # Generate inputs for the GEMM + A, B, C = [torch.ones((512, 512)).to('cuda') for _ in range(3)] + + # Run the module + D = mod.run(A, B, C) + + +Example usage without JIT compilation: + +.. highlight:: python +.. code-block:: python + + plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass.LayoutType.RowMajor) + op = plan.construct() + cutlass.emit.pytorch(op, 'cutlass_gemm', 80, jit=False, sourcedir='output') + +After this call, the directory ``output`` contains ``setup.py``, +``cutlass_gemm.cpp``, and ``cutlass_gemm_kernel.cu``. The module can be built from +within ``output`` by running: ``TORCH_CUDA_ARCH_LIST="8.0" python setup.py develop --user``. + +The module can later be used in Python via: + +.. highlight:: python +.. code-block:: python + + import torch + import cutlass_gemm + + # Generate inputs for the GEMM + A, B, C = [torch.ones((512, 512)).to('cuda') for _ in range(3)] + + # Run the module + D = cutlass_gemm.run(A, B, C) +""" + +import logging +import os + +from cutlass import CUTLASS_PATH, logger, swizzle, ConvKind, ConvKindNames, DataType +from cutlass.backend.gemm_operation import GemmOperationGrouped, GemmOperationUniversal +from cutlass.backend.conv2d_operation import Conv2dOperation +from cutlass.backend.library import ApiVersion +from cutlass.backend.utils.software import CheckPackages, SubstituteTemplate +from cutlass.emit import common + +torch_available = CheckPackages().check_torch() +if torch_available: + import torch + + +_PYTORCH_CUDA_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """ +#include +#include +#include +#include +#include "cutlass/cutlass.h" +#include "cutlass/util/device_memory.h" + +// helper function allocating the memory +void* device_memory_allocation(size_t size, int device_id=0) { + if (size > 0) { + torch::Device device(torch::kCUDA, device_id); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + torch::TensorOptions options = torch::TensorOptions().dtype(torch::kI8).device(device); + at::Tensor device_tensor = torch::empty({(long)size,}, options); + return reinterpret_cast(device_tensor.data_ptr()); + } else { + return nullptr; + } +} + +${includes} +${declaration} +${impl} +""" + +_PYTORCH_GEMM_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """ +#include +#include +#include + +// CUDA forward declarations +at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional C=at::nullopt, float alpha=1.f, float beta=0.f); + +// C++ interface +at::Tensor ${name}(const at::Tensor& A, const at::Tensor& B, at::optional C=at::nullopt, float alpha=1.f, float beta=0.f) { + return ${name}_kernel(A, B, C, alpha, beta); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("run", py::overload_cast, float, float>(&${name}), py::arg("A"), py::arg("B"), py::arg("C") = nullptr, py::arg("alpha") = 1.f, py::arg("beta") = 0.f); +} +""" + +_PYTORCH_GROUPED_GEMM_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """ +#include +#include +#include + +// CUDA forward declarations +std::vector ${name}_kernel(const std::vector& A, const std::vector& B, at::optional> C=at::nullopt, float alpha=1.f, float beta=0.f); + +// C++ interface +std::vector ${name}(const std::vector& A, const std::vector& B, at::optional> C=at::nullopt, float alpha=1.f, float beta=0.f) { + return ${name}_kernel(A, B, C, alpha, beta); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("run", py::overload_cast&, const std::vector&, at::optional>, float, float>(&${name}), + py::arg("A"), py::arg("B"), py::arg("C") = nullptr, py::arg("alpha") = 1.f, py::arg("beta") = 0.f); +} +""" + +_PYTORCH_CONV2D_FPROP_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """ +#include +#include +#include + +// CUDA forward declarations +at::Tensor ${name}_kernel( + const at::Tensor& A, const at::Tensor& B, at::optional C=at::nullopt, + std::tuple stride={1, 1}, std::tuple padding={0, 0}, std::tuple dilation={1, 1}, + float alpha=1.f, float beta=0.f, + std::string split_k_mode="serial", int split_k_slices=1); + +// C++ interface +at::Tensor ${name}( + const at::Tensor& A, const at::Tensor& B, at::optional C=at::nullopt, + std::tuple stride={1, 1}, std::tuple padding={0, 0}, std::tuple dilation={1, 1}, + float alpha=1.f, float beta=0.f, + std::string split_k_mode="serial", int split_k_slices=1) { + return ${name}_kernel(A, B, C, stride, padding, dilation, alpha, beta, split_k_mode, split_k_slices); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("run", + py::overload_cast< + const at::Tensor&, const at::Tensor&, at::optional, + std::tuple, std::tuple, std::tuple, float, float, std::string, int>( + &${name}), py::arg("A"), py::arg("B"), py::arg("C") = nullptr, + py::arg("stride") = std::make_tuple(1, 1), py::arg("padding") = std::make_tuple(1, 1), py::arg("dilation") = std::make_tuple(1, 1), + py::arg("alpha") = 1.f, py::arg("beta") = 0.f, + py::arg("split_k_mode") = "serial", py::arg("split_k_slices") = 1); +} +""" + +_PYTORCH_CONV2D_GRAD_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """ +#include +#include +#include + +// CUDA forward declarations +at::Tensor ${name}_kernel( + std::tuple result_size, const at::Tensor& A, const at::Tensor& B, at::optional C=at::nullopt, + std::tuple stride={1, 1}, std::tuple padding={0, 0}, std::tuple dilation={1, 1}, + float alpha=1.f, float beta=0.f, + std::string split_k_mode="serial", int split_k_slices=1); + +// C++ interface +at::Tensor ${name}( + std::tuple result_size, const at::Tensor& A, const at::Tensor& B, at::optional C=at::nullopt, + std::tuple stride={1, 1}, std::tuple padding={0, 0}, std::tuple dilation={1, 1}, + float alpha=1.f, float beta=0.f, + std::string split_k_mode="serial", int split_k_slices=1) { + return ${name}_kernel(result_size, A, B, C, stride, padding, dilation, alpha, beta, split_k_mode, split_k_slices); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("run", + py::overload_cast< + std::tuple, const at::Tensor&, const at::Tensor&, at::optional, + std::tuple, std::tuple, std::tuple, float, float, std::string, int>( + &${name}), py::arg("result_size"), py::arg("A"), py::arg("B"), py::arg("C") = nullptr, + py::arg("stride") = std::make_tuple(1, 1), py::arg("padding") = std::make_tuple(1, 1), py::arg("dilation") = std::make_tuple(1, 1), + py::arg("alpha") = 1.f, py::arg("beta") = 0.f, + py::arg("split_k_mode") = "serial", py::arg("split_k_slices") = 1); +} +""" + +_PYTORCH_GEMM_INCLUDES = { + ApiVersion.v2x: """ +#include "cutlass/gemm/device/gemm_universal.h" +""", + ApiVersion.v3x: """ +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/collective/collective_builder.hpp" +#include "cutlass/gemm/device/gemm_universal_adapter.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" +#include "cutlass/epilogue/collective/default_epilogue.hpp" +#include "cutlass/util/packed_stride.hpp" +""", +} + +_PYTORCH_GROUPED_GEMM_INCLUDES = """ +#include "cutlass/gemm/kernel/default_gemm_grouped.h" +#include "cutlass/gemm/device/gemm_grouped.h" +""" + +_PYTORCH_CONV2D_INCLUDES = """ +#include "cutlass/conv/kernel/default_conv2d_fprop.h" +#include "cutlass/conv/kernel/default_conv2d_dgrad.h" +#include "cutlass/conv/kernel/default_conv2d_wgrad.h" +#include "cutlass/conv/device/implicit_gemm_convolution.h" +""" + +_CUTLASS_TYPE_TO_TORCH_TYPE = { + DataType.f16: "torch::kF16", + DataType.f32: "torch::kF32", + DataType.f64: "torch::kF64", + DataType.s8: "torch::I8", + DataType.s32: "torch::I32", +} + +_PYTORCH_GEMM_IMPL_TEMPLATE_2x = ( + common._CUTLASS_KERNEL_RUN_GEMM_2x + + """ +at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional C, float alpha, float beta) { + int M = A.size(0); + int N = B.size(1); + int K = A.size(1); + + typename DeviceKernel::ElementC* ptrC = (C == at::nullopt) ? + nullptr : + reinterpret_cast(C->contiguous().data_ptr()); + at::Tensor D = B.new_empty({M, N}, ${torch_type_C}); + + cutlass::Status status = ${name}_kernel_run(M, N, K, + reinterpret_cast(A.contiguous().data_ptr()), + reinterpret_cast(B.contiguous().data_ptr()), + ptrC, + reinterpret_cast(D.contiguous().data_ptr()), + ElementCompute(alpha), ElementCompute(beta)); + + TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed"); + return D; +} +""" +) + +_PYTORCH_GEMM_IMPL_TEMPLATE_3x = ( + common._CUTLASS_KERNEL_RUN_GEMM_3x + + """ +bool hw_info_queried = false; +cutlass::KernelHardwareInfo hw_info; + +at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional C, float alpha, float beta) { + int M = A.size(0); + int N = B.size(1); + int K = A.size(1); + int L = 1; + + // Query hardware info if we haven't already + if (!hw_info_queried) { + hw_info.device_id = 0; + hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id); + } + + typename DeviceKernel::ElementC* ptrC = (C == at::nullopt) ? + nullptr : + reinterpret_cast(C->contiguous().data_ptr()); + at::Tensor D = B.new_empty({M, N}, ${torch_type_C}); + + cutlass::Status status = ${name}_kernel_run(M, N, K, L, + reinterpret_cast(A.contiguous().data_ptr()), + reinterpret_cast(B.contiguous().data_ptr()), + ptrC, + reinterpret_cast(D.contiguous().data_ptr()), + ElementCompute(alpha), ElementCompute(beta), + hw_info); + + TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed"); + return D; +} +""" +) + + +_PYTORCH_GROUPED_GEMM_IMPL_TEMPLATE = ( + common._CUTLASS_KERNEL_RUN_GROUPED_GEMM_2x + + """ +std::vector ${name}_kernel(const std::vector& A, const std::vector& B, at::optional> C, float alpha, float beta) { + size_t num = A.size(); + + // To avoid performing many small cudaMallocs and host-to-device copies, + // we serialize the grouped GEMM arguments on the host, allocate one + // large chunk of device memory, and perform a single cudaMemcpy to + // copy the host data to the device. Allocation overheads could be + // avoided by using a memory pool. + + // Calculate the total size of the data to be copied from host to device + size_t total_size = sizeof(cutlass::gemm::GemmCoord) + + sizeof(DeviceKernel::ElementA*) + + sizeof(DeviceKernel::ElementB*) + + sizeof(DeviceKernel::ElementC*) + + sizeof(DeviceKernel::ElementC*) + + sizeof(int64_t) + + sizeof(int64_t) + + sizeof(int64_t); + total_size *= num; + + // num * sizeof(cutlass::gemm::GemmCoord) may leave one at a non-multiple + // of sizeof(DeviceKernel::ElementA*) (which will be 64 on a 64-bit system). + // To ensure that we don't end up having misaligned loads in the kernel, + // we pad to the nearest multiple of 8. + // + // Note that, even on a 32-bit system (for which sizeof(X*) will not equal + // sizeof(int64_t)), only padding between the list of GemmCoords and the + // list of ptr_As is sufficient because the set of four equal-length lists of pointers + // (A*, B*, C*, D*) will ensure that the first list of int64_ts will always + // start on a multiple of 8. + int64_t padding = 8 - (total_size % 8); + total_size += padding; + + uint8_t* host_data = new uint8_t[total_size]; + cutlass::DeviceAllocation device_data(total_size); + + uint8_t* start = host_data; + cutlass::gemm::GemmCoord* problem_sizes_host = reinterpret_cast(start); + + // Apply the padding after the list of GemmCoords + start += num * sizeof(cutlass::gemm::GemmCoord) + padding; + + int64_t ptr_A_offset = start - host_data; + DeviceKernel::ElementA** ptr_A_host = reinterpret_cast(start); + start += num * sizeof(DeviceKernel::ElementA*); + + int64_t ptr_B_offset = start - host_data; + DeviceKernel::ElementB** ptr_B_host = reinterpret_cast(start); + start += num * sizeof(DeviceKernel::ElementB*); + + int64_t ptr_C_offset = start - host_data; + DeviceKernel::ElementC** ptr_C_host = reinterpret_cast(start); + start += num * sizeof(DeviceKernel::ElementC*); + + int64_t ptr_D_offset = start - host_data; + DeviceKernel::ElementC** ptr_D_host = reinterpret_cast(start); + start += num * sizeof(DeviceKernel::ElementC*); + + int64_t lda_offset = start - host_data; + int64_t* lda_host = reinterpret_cast(start); + start += num * sizeof(int64_t); + + int64_t ldb_offset = start - host_data; + int64_t* ldb_host = reinterpret_cast(start); + start += num * sizeof(int64_t); + + int64_t ldc_offset = start - host_data; + int64_t* ldc_host = reinterpret_cast(start); + start += num * sizeof(int64_t); + + std::vector D(num); + + bool need_C = (C != at::nullopt) && (beta != 0.f); + for (size_t i = 0; i < num; ++i) { + int M = A[i].size(0); + int N = B[i].size(1); + int K = A[i].size(1); + *(problem_sizes_host + i) = {M, N, K}; + *(ptr_A_host + i) = reinterpret_cast(A[i].contiguous().data_ptr()); + *(ptr_B_host + i) = reinterpret_cast(B[i].contiguous().data_ptr()); + + if (need_C) { + *(ptr_C_host + i) = reinterpret_cast(C->at(i).contiguous().data_ptr()); + } + else { + *(ptr_C_host + i) = nullptr; + } + + D[i] = B[i].new_empty({M, N}, ${torch_type_C}); + *(ptr_D_host + i) = reinterpret_cast(D[i].contiguous().data_ptr()); + + *(lda_host + i) = DeviceKernel::LayoutA::packed({M, K}).stride(0); + *(ldb_host + i) = DeviceKernel::LayoutB::packed({K, N}).stride(0); + *(ldc_host + i) = DeviceKernel::LayoutC::packed({M, N}).stride(0); + } + + device_data.copy_from_host(host_data); + + cutlass::Status status = ${name}_kernel_run( + num, + reinterpret_cast(device_data.get()), + reinterpret_cast(device_data.get() + ptr_A_offset), + reinterpret_cast(device_data.get() + ptr_B_offset), + reinterpret_cast(device_data.get() + ptr_C_offset), + reinterpret_cast(device_data.get() + ptr_D_offset), + reinterpret_cast(device_data.get() + lda_offset), + reinterpret_cast(device_data.get() + ldb_offset), + reinterpret_cast(device_data.get() + ldc_offset), + reinterpret_cast(device_data.get() + ldc_offset), + ElementCompute(alpha), ElementCompute(beta)); + + delete[] host_data; + + TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed"); + return D; +} +""" +) + +_PYTORCH_CONV2D_IMPL_TEMPLATE_2x = """ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + cutlass::Status status = ${name}_kernel_run( + &problem_size, + reinterpret_cast(A.data_ptr()), + reinterpret_cast(B.data_ptr()), + ptrC, + reinterpret_cast(D.data_ptr()), + alpha, beta, + split_k_mode, stream, B.device().index()); + + TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed"); + return D; +} +""" + +_PYTORCH_CONV2D_FPROP_IMPL_TEMPLATE_2x = ( + common._CUTLASS_KERNEL_RUN_CONV2D_2x + + """ +at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional C=at::nullopt, + std::tuple stride={1, 1}, std::tuple padding={0, 0}, std::tuple dilation={1, 1}, + float alpha=1.f, float beta=0.f, std::string split_k_mode="serial", int split_k_slices=1) { + int N, H, W, C_, K, R, S, P, Q; + N = A.size(0); + C_ = A.size(1); + H = A.size(2); + W = A.size(3); + + K = B.size(0); + R = B.size(2); + S = B.size(3); + + cutlass::conv::Conv2dProblemSize problem_size( + cutlass::Tensor4DCoord(N, H, W, C_), + cutlass::Tensor4DCoord(K, R, S, C_), + cutlass::Tensor4DCoord(std::get<0>(padding), std::get<0>(padding), std::get<1>(padding), std::get<1>(padding)), + cutlass::MatrixCoord(std::get<0>(stride), std::get<1>(stride)), + cutlass::MatrixCoord(std::get<0>(dilation), std::get<1>(dilation)), + cutlass::conv::Mode::kCrossCorrelation, + split_k_slices + ); + + P = problem_size.P; + Q = problem_size.Q; + + typename UnderlyingKernel::ElementC* ptrC = (C == at::nullopt) ? + nullptr : + reinterpret_cast(C->data_ptr()); + + torch::TensorOptions options = torch::TensorOptions().dtype(${torch_type_C}).device(B.device()).memory_format(at::MemoryFormat::ChannelsLast); + at::Tensor D = torch::zeros({N, K, P, Q}, options); +""" + _PYTORCH_CONV2D_IMPL_TEMPLATE_2x +) + + +_PYTORCH_CONV2D_DGRAD_IMPL_TEMPLATE_2x = ( + common._CUTLASS_KERNEL_RUN_CONV2D_2x + + """ +at::Tensor ${name}_kernel(std::tuple input_size, const at::Tensor& A, const at::Tensor& B, at::optional C=at::nullopt, + std::tuple stride={1, 1}, std::tuple padding={0, 0}, std::tuple dilation={1, 1}, float alpha=1.f, float beta=0.f, + std::string split_k_mode="serial", int split_k_slices=1) { + int N, H, W, C_, K, R, S; + N = std::get<0>(input_size); + C_ = std::get<1>(input_size); + H = std::get<2>(input_size); + W = std::get<3>(input_size); + + K = B.size(0); + R = B.size(2); + S = B.size(3); + + cutlass::conv::Conv2dProblemSize problem_size( + cutlass::Tensor4DCoord(N, H, W, C_), + cutlass::Tensor4DCoord(K, R, S, C_), + cutlass::Tensor4DCoord(std::get<0>(padding), std::get<0>(padding), std::get<1>(padding), std::get<1>(padding)), + cutlass::MatrixCoord(std::get<0>(stride), std::get<1>(stride)), + cutlass::MatrixCoord(std::get<0>(dilation), std::get<1>(dilation)), + cutlass::conv::Mode::kCrossCorrelation, + split_k_slices + ); + + typename UnderlyingKernel::ElementC* ptrC = (C == at::nullopt) ? + nullptr : + reinterpret_cast(C->data_ptr()); + + torch::TensorOptions options = torch::TensorOptions().dtype(${torch_type_C}).device(B.device()).memory_format(at::MemoryFormat::ChannelsLast); + at::Tensor D = torch::empty({N, C_, H, W}, options); +""" + _PYTORCH_CONV2D_IMPL_TEMPLATE_2x +) + + +_PYTORCH_CONV2D_WGRAD_IMPL_TEMPLATE_2x = ( + common._CUTLASS_KERNEL_RUN_CONV2D_2x + + """ +at::Tensor ${name}_kernel(std::tuple weight_size, const at::Tensor& A, const at::Tensor& B, at::optional C=at::nullopt, + std::tuple stride={1, 1}, std::tuple padding={0, 0}, std::tuple dilation={1, 1}, float alpha=1.f, float beta=0.f, + std::string split_k_mode="serial", int split_k_slices=1) { + int N, H, W, C_, K, R, S; + K = std::get<0>(weight_size); + C_ = std::get<1>(weight_size); + R = std::get<2>(weight_size); + S = std::get<3>(weight_size); + + N = B.size(0); + H = B.size(2); + W = B.size(3); + + cutlass::conv::Conv2dProblemSize problem_size( + cutlass::Tensor4DCoord(N, H, W, C_), + cutlass::Tensor4DCoord(K, R, S, C_), + cutlass::Tensor4DCoord(std::get<0>(padding), std::get<0>(padding), std::get<1>(padding), std::get<1>(padding)), + cutlass::MatrixCoord(std::get<0>(stride), std::get<1>(stride)), + cutlass::MatrixCoord(std::get<0>(dilation), std::get<1>(dilation)), + cutlass::conv::Mode::kCrossCorrelation, + split_k_slices + ); + + typename UnderlyingKernel::ElementC* ptrC = (C == at::nullopt) ? + nullptr : + reinterpret_cast(C->data_ptr()); + + torch::TensorOptions options = torch::TensorOptions().dtype(${torch_type_C}).device(B.device()).memory_format(at::MemoryFormat::ChannelsLast); + at::Tensor D = torch::empty({K, C_, R, S}, options); +""" + _PYTORCH_CONV2D_IMPL_TEMPLATE_2x +) + + +_PYTORCH_SETUP_PY = common._PYSTYLE_AUTOGEN_COMMENT + """ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +setup( + name='${name}', + ext_modules=[ + CUDAExtension('${name}', [ + '${name}.cpp', + '${name}_kernel.cu', + ], + include_dirs=['${cutlass_path}/include', '${cutlass_path}/tools/util/include'], + extra_compile_args=['-std=c++17'] + ), + ], + cmdclass={ + 'build_ext': BuildExtension + }) + +""" + + +def _generate_setup(name: str, sourcedir: str): + """ + Generates a setup.py file for the extension + + :param name: name of the module to generate + :type name: str + :param sourcedir: directory to which generated source files should be written + :type sourcedir: str + """ + setup_py_file = os.path.join(sourcedir, "setup.py") + setup_source = SubstituteTemplate( + _PYTORCH_SETUP_PY, {"name": name, "cutlass_path": CUTLASS_PATH} + ) + with open(setup_py_file, "w") as outfile: + outfile.write(setup_source) + + +class _ArchListSetter: + """ + Utility context manager for temporarily setting the value of the ``TORCH_CUDA_ARCH_LIST`` + environment variable when building a PyTorch CUDA module. + + ``TORCH_CUDA_ARCH_LIST`` is a space-delmited list of compute capabilites for which a PyTorch + CUDA module should be compiled. + + For example, ``TORCH_CUDA_ARCH_LIST="7.0 8.0"`` would result in the inclusion of + ``-gencode=arch=compute_70,code=sm_70`` and ``-gencode=arch=compute_80,code=sm_80`` in the + compilation of the module. + + This utility wraps the building of a PyTorch CUDA module with a setting of this environment + variable according to the current compute capability being targetted. + + Example usage: + + .. highlight:: python + .. code-block:: python + + # Temporarily set TORCH_CUDA_ARCH_LIST="8.0" + with _ArchListSetter(80): + # Perform JIT compilation and loading of the module + mod = torch.utils.cpp_extension.load(...) + + :param cc: compute capability + :type cc: int + """ + + _TORCH_CUDA_ARCH_LIST = "TORCH_CUDA_ARCH_LIST" + + def __init__(self, cc: int): + self.cc_str = ".".join(list(str(cc))) + + def __enter__(self): + """ + Saves the old value of TORCH_CUDA_ARCH_LIST and reset it to the new value based on ``cc`` + """ + self.old_arch_list = os.getenv(_ArchListSetter._TORCH_CUDA_ARCH_LIST) + os.environ[_ArchListSetter._TORCH_CUDA_ARCH_LIST] = self.cc_str + + return self + + def __exit__(self, exc_type, exc_val, traceback): + """ + Restores the old value of TORCH_CUDA_ARCH_LIST + """ + os.environ[_ArchListSetter._TORCH_CUDA_ARCH_LIST] = self.old_arch_list + + +def _jit(name: str, cc: int, cpp_file: str, cuda_file: str): + """ + JIT compiles and loads a PyTorch CUDA extension. + + :param name: name of the module to generate + :type name: str + :param cc: compute capability of the device the module should target + :type cc: int + :param cpp_file: path to file containing extension's C++ interface + :type cpp_file: str + :param cuda_file: path to file containing extension's CUDA interface + :type cuda_file: str + + :return: loaded PyTorch module + """ + + from torch.utils.cpp_extension import load + + extra_cuda_cflags = ["-std=c++17"] + if cc == 90: + # PyTorch does not currently add the sm_90a target when compute capability + # 9.0 is set within TORCH_CUDA_ARCH_LIST. Thus, we manually add the sm_90a target. + extra_cuda_cflags.append("-gencode=arch=compute_90a,code=sm_90a") + + with _ArchListSetter(cc): + jitmodule = load( + name, + [cpp_file, cuda_file], + extra_cuda_cflags=extra_cuda_cflags, + extra_include_paths=[ + os.path.join(CUTLASS_PATH, "include"), + os.path.join(CUTLASS_PATH, "tools/util/include"), + ], + verbose=(logger.level == logging.DEBUG) + ) + return jitmodule + + +def _pytorch_gemm(op, name: str, cc: int, jit: bool = False, sourcedir: str = ""): + """ + Generates source for building a PyTorch CUDA module that leverages the CUTLASS GEMM + specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time + compiled, loaded, and returned. + + :param op: operation to emit in the module + :param name: name of the module to generate + :type name: str + :param cc: compute capability of the device the module should target + :type cc: int + :param jit: whether the module should be just-in-time compiled + :type jit: bool + :param sourcedir: directory to which generated source files should be written + :type sourcedir: str + + :return: loaded PyTorch module if ``jit=True`` or ``None`` otherwise + """ + if sourcedir != "" and not os.path.isdir(sourcedir): + os.makedirs(sourcedir) + + cuda_file = os.path.join(sourcedir, name + "_kernel.cu") + extra_kw = {} + if op.api == ApiVersion.v3x: + impl_template = _PYTORCH_GEMM_IMPL_TEMPLATE_3x + else: + impl_template = _PYTORCH_GEMM_IMPL_TEMPLATE_2x + if op.swizzling_functor == swizzle.ThreadblockSwizzleStreamK: + extra_kw["args"] = common._CUTLASS_KERNEL_ARGS_2x_STREAM_K + else: + extra_kw["args"] = common._CUTLASS_KERNEL_ARGS_2x + impl_template = ( + _PYTORCH_GEMM_IMPL_TEMPLATE_3x + if op.api == ApiVersion.v3x + else _PYTORCH_GEMM_IMPL_TEMPLATE_2x + ) + cuda_impl = SubstituteTemplate(impl_template, {"name": name, **extra_kw}) + cuda_source = SubstituteTemplate( + _PYTORCH_CUDA_TEMPLATE, + { + "includes": _PYTORCH_GEMM_INCLUDES[op.api], + "declaration": op.rt_module.emit(), + "procedural_name": op.procedural_name(), + "impl": cuda_impl, + "torch_type_C": _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element], + }, + ) + with open(cuda_file, "w") as outfile: + outfile.write(cuda_source) + + cpp_file = os.path.join(sourcedir, name + ".cpp") + cpp_source = SubstituteTemplate( + _PYTORCH_GEMM_CPP_TEMPLATE, + {"name": name, "description": f"CUTLASS {op.procedural_name()} GEMM"}, + ) + with open(cpp_file, "w") as outfile: + outfile.write(cpp_source) + + _generate_setup(name, sourcedir) + + if jit: + return _jit(name, cc, cpp_file, cuda_file) + + return None + + +def _pytorch_grouped_gemm( + op, name: str, cc: int, jit: bool = False, sourcedir: str = "" +): + """ + Generates source for building a PyTorch CUDA module that leverages the CUTLASS grouped GEMM + specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time + compiled, loaded, and returned. + + :param op: operation to emit in the module + :param name: name of the module to generate + :type name: str + :param cc: compute capability of the device the module should target + :type cc: int + :param jit: whether the module should be just-in-time compiled + :type jit: bool + :param sourcedir: directory to which generated source files should be written + :type sourcedir: str + + :return: loaded PyTorch module if ``jit=True`` or ``None`` otherwise + """ + if op.api != ApiVersion.v2x: + raise Exception("Grouped GEMM is currently only supported for CUTLASS 2.x") + + if sourcedir != "" and not os.path.isdir(sourcedir): + os.makedirs(sourcedir) + + cuda_file = os.path.join(sourcedir, name + "_kernel.cu") + cuda_impl = SubstituteTemplate(_PYTORCH_GROUPED_GEMM_IMPL_TEMPLATE, {"name": name}) + cuda_source = SubstituteTemplate( + _PYTORCH_CUDA_TEMPLATE, + { + "includes": _PYTORCH_GROUPED_GEMM_INCLUDES, + "declaration": op.rt_module.emit(), + "procedural_name": op.procedural_name(), + "impl": cuda_impl, + "torch_type_C": _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element], + }, + ) + with open(cuda_file, "w") as outfile: + outfile.write(cuda_source) + + cpp_file = os.path.join(sourcedir, name + ".cpp") + cpp_source = SubstituteTemplate( + _PYTORCH_GROUPED_GEMM_CPP_TEMPLATE, + {"name": name, "description": f"CUTLASS {op.procedural_name()} grouped GEMM"}, + ) + with open(cpp_file, "w") as outfile: + outfile.write(cpp_source) + + _generate_setup(name, sourcedir) + + if jit: + return _jit(name, cc, cpp_file, cuda_file) + + return None + + +def _pytorch_conv2d(op, name: str, cc: int, jit: bool = False, sourcedir: str = ""): + """ + Generates source for building a PyTorch CUDA module that leverages the CUTLASS Conv2d + specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time + compiled, loaded, and returned. + + :param op: operation to emit in the module + :param name: name of the module to generate + :type name: str + :param cc: compute capability of the device the module should target + :type cc: int + :param jit: whether the module should be just-in-time compiled + :type jit: bool + :param sourcedir: directory to which generated source files should be written + :type sourcedir: str + + Note that the when conv kind is `dgrad` or `wgrad`, the size of the input `(N, C, H, W)` or + weight `(K, C, R, S)` should be provided. This is because there are multiple valid solutions + for H/W/R/S given the same P/Q. + + :return: loaded PyTorch module if ``jit=True`` or ``None`` otherwise + """ + if sourcedir != "" and not os.path.isdir(sourcedir): + os.makedirs(sourcedir) + cuda_file = os.path.join(sourcedir, name + "_kernel.cu") + extra_kw = {} + if op.conv_kind == ConvKind.Fprop: + impl_template = _PYTORCH_CONV2D_FPROP_IMPL_TEMPLATE_2x + cpp_template = _PYTORCH_CONV2D_FPROP_CPP_TEMPLATE + elif op.conv_kind == ConvKind.Dgrad: + impl_template = _PYTORCH_CONV2D_DGRAD_IMPL_TEMPLATE_2x + cpp_template = _PYTORCH_CONV2D_GRAD_CPP_TEMPLATE + elif op.conv_kind == ConvKind.Wgrad: + impl_template = _PYTORCH_CONV2D_WGRAD_IMPL_TEMPLATE_2x + cpp_template = _PYTORCH_CONV2D_GRAD_CPP_TEMPLATE + extra_kw["conv_kind_name"] = ConvKindNames[op.conv_kind].capitalize() + extra_kw["torch_type_C"] = _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element] + cuda_impl = SubstituteTemplate(impl_template, {"name": name, **extra_kw}) + cuda_source = SubstituteTemplate( + _PYTORCH_CUDA_TEMPLATE, + { + "includes": _PYTORCH_CONV2D_INCLUDES, + "declaration": op.rt_module.emit(), + "procedural_name": op.procedural_name(), + "impl": cuda_impl, + "torch_type_C": _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element], + }, + ) + with open(cuda_file, "w") as outfile: + outfile.write(cuda_source) + + cpp_file = os.path.join(sourcedir, name + ".cpp") + cpp_source = SubstituteTemplate( + cpp_template, + {"name": name, "description": f"CUTLASS {op.procedural_name()} Conv2d"}, + ) + with open(cpp_file, "w") as outfile: + outfile.write(cpp_source) + + _generate_setup(name, sourcedir) + + if jit: + return _jit(name, cc, cpp_file, cuda_file) + + return None + + +def pytorch(op, name: str, cc: int, jit: bool = False, sourcedir: str = ""): + """ + Generates source for building a PyTorch CUDA module that leverages the CUTLASS kernel + specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time + compiled, loaded, and returned. + + The result of this method is files within ``sourcedir`` that can be used for building + a PyTorch module. + + :param op: operation to emit in the module + :param name: name of the module to generate + :type name: str + :param cc: compute capability of the device the module should target + :type cc: int + :param jit: whether the module should be just-in-time compiled + :type jit: bool + :param sourcedir: directory to which generated source files should be written + :type sourcedir: str + + :return: loaded PyTorch module (if ``jit=True``) or None + """ + device_op = op.device_op() + if isinstance(op, GemmOperationUniversal): + return _pytorch_gemm(device_op, name, cc, jit, sourcedir) + elif isinstance(op, GemmOperationGrouped): + return _pytorch_grouped_gemm(device_op, name, cc, jit, sourcedir) + elif isinstance(op, Conv2dOperation): + return _pytorch_conv2d(device_op, name, cc, jit, sourcedir) + else: + raise Exception( + f"Operation type {type(op)} is not currently supported for PyTorch emission." + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..103cb5fcd83505191e1a7302dfa616808c73d690 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/__init__.py @@ -0,0 +1,53 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +from cutlass.epilogue.epilogue import ( + get_activations, + get_activation_epilogue, + gelu, + hardswish, + identity, + leaky_relu, + relu, + sigmoid, + silu, + tanh, + trace +) + +from cutlass.epilogue.evt_ops import ( + max, + multiply_add, + sum, + permute, + reshape +) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ce1e104513871bfa1b9d11ddd67754f944e048a Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/__pycache__/epilogue.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/__pycache__/epilogue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df8771178cf8d3f8a79c5bf82ac2248eb37b8797 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/__pycache__/epilogue.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/__pycache__/evt_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/__pycache__/evt_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b32d6aea8d1929432957c76372c096a46cc1a0d8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/__pycache__/evt_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/epilogue.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/epilogue.py new file mode 100644 index 0000000000000000000000000000000000000000..d76123f34eb00e674c11784883627a6357b57510 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/epilogue.py @@ -0,0 +1,158 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Registry of elementwise epilogues + +Elementwise epilogues can be added to many CUTLASS kernels in the CUTLAS Python interface via +code like the following for GEMM: + +.. highlight:: python +.. code-block:: python + + plan = cutlass.op.Gemm(element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor) + plan.activation = cutlass.epilogue.relu +""" + +from cutlass.backend import epilogue + + +gelu = epilogue.gelu +hardswish = epilogue.hardswish +identity = epilogue.identity +leaky_relu = epilogue.leaky_relu +relu = epilogue.relu +sigmoid = epilogue.sigmoid +silu = epilogue.silu +tanh = epilogue.tanh + + +_activations = [gelu, hardswish, identity, leaky_relu, relu, sigmoid, silu, tanh] + + +def get_activations() -> list: + """ + Returns a list of available activation functions + + :return: list of available activation functions + :rtype: list + """ + return _activations + + +def get_activation_epilogue( + activation, + element_output, + elements_per_access, + element_accumulator, + element_compute, +): + """ + Return an epilogue corresponding to the activation function, data types, and alignment + used in the kernel + + :param activation: elementwise activation function to use + :param element_output: data type of the output + :param elements_per_access: alignment of operand C of the kernel + :type elements_per_access: int + :param element_accumulator: data type of the accumulated output C + :param element_compute: data type in which compute operations should be performed + + :return: epilogue functor + """ + if activation not in _activations: + raise Exception( + f"Unsupported activation type {activation}. Available activations are: {_activations}" + ) + + if activation == identity: + return epilogue.LinearCombination( + element_output, elements_per_access, element_accumulator, element_compute + ) + else: + return epilogue.LinearCombinationGeneric( + activation, + element_output, + elements_per_access, + element_accumulator, + element_compute, + ) + + +""" +Frontend for EVT that generates epilogue functor through tracing the input function +""" +from cutlass.backend.evt.frontend import PythonASTFrontend + + +def trace(fn, example_tensors, **kwargs): + """ + Trace `fn(**example_tensors)` and generates epilogue visitor + + :param fn: Python callables + :param example_tensors: example inputs for fn + :type example_tensors: dict + + .. hightlight:: python + .. code-block:: python + import cutlass.backend.evt + + # Define epilogue function as Python callable + def example_fn(accum, C, alpha, beta, gamma): + D = ((accum + C) * alpha - gamma) / beta + return D + + # Define the example tensors + example_inputs = { + "accum": torch.empty(size=(6, 512, 512), dtype=torch.float16, device="cuda"), + "C": torch.empty(size=(6, 512, 512), dtype=torch.float16, device="cuda"), + "alpha": 1.5, + "beta": 0.5, + "gamma": 2.5, + "D": torch.empty(size=(6, 512, 512), dtype=torch.float16, device="cuda") + } + + # Generate the epilogue functor + epilogue_visitor = cutlass.epilogue.trace(example_fn, example_inputs) + """ + if callable(fn): + class EpilogueFunctor(PythonASTFrontend): + def __init__(self, **kwargs): + super().__init__(**kwargs) + pass + setattr(EpilogueFunctor, "__call__", staticmethod(fn)) + + epilogue_functor = EpilogueFunctor(**kwargs) + epilogue_functor.trace(example_tensors) + return epilogue_functor + else: + raise NotImplementedError("Expect a callable Python function") diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/evt_ops.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/evt_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..19f79a3dab6565d607620d104eb27fc0183bca1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/epilogue/evt_ops.py @@ -0,0 +1,79 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Collection of builtin functions used for host reference in EVT +""" + +import numpy as np + +from cutlass.backend.utils.software import CheckPackages + +torch_available = CheckPackages().check_torch() +if torch_available: + import torch + + +def multiply_add(x, y, z): + return x * y + z + + +def sum(x, dim): + if isinstance(x, np.ndarray): + return x.sum(axis=tuple(dim)) + elif torch_available and isinstance(x, torch.Tensor): + return torch.sum(x, dim) + + +def max(x, dim): + if isinstance(x, np.ndarray): + return x.max(axis=tuple(dim)) + elif torch_available and isinstance(x, torch.Tensor): + return torch.amax(x, dim) + + +############################################################################## +# Layout manipulate nodes +############################################################################## + +def permute(x, indices: tuple): + if isinstance(x, np.ndarray): + return np.transpose(x, axes=indices) + elif torch_available and isinstance(x, torch.Tensor): + return x.permute(*indices) + + +def reshape(x, new_shape: tuple): + if isinstance(x, np.ndarray): + return np.reshape(x, newshape=new_shape) + elif torch_available and isinstance(x, torch.Tensor): + return x.view(new_shape) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/library_defaults.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/library_defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..ad3e9ba8b34374fe1fe4813bc1852fe411beedbe --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/library_defaults.py @@ -0,0 +1,481 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Classes containing valid operations for a given compute capability and data types. +""" + +import logging + +from cuda import __version__ +import cutlass_library +from cutlass_library.library import ConvKind, IteratorAlgorithm, StrideSupport, GroupMode + +import cutlass +from cutlass.utils.check import valid_stage_count +from cutlass.utils.datatypes import td_from_profiler_td, td_from_profiler_op + + +_generator_ccs = [50, 60, 61, 70, 75, 80, 90] + +# Strip any additional information from the CUDA version +_cuda_version = __version__.split("rc")[0] + + +class KernelsForDataType: + """ + Container class for keeping track of kernels that correspond to a particular combination + of data types for operands A, B, and accumulator + """ + + def __init__(self, datatype_comb: tuple, layout_comb: tuple): + self.datatype_comb = datatype_comb + self.layout_comb = layout_comb + + # Dictionary mapping from alignment (int) to a list of kernels that fit the alignment + # constraint for the data type combination + self.kernels_by_alignment = {} + + def add(self, operation): + """ + Add an operation to the list of supported kernels + """ + alignment = operation.A.alignment + if alignment not in self.kernels_by_alignment: + self.kernels_by_alignment[alignment] = [] + self.kernels_by_alignment[alignment].append(operation) + + @property + def alignments(self): + """ + Returns an unsorted list of alignments supported by this data type combination + + :return: unsorted list of alignments supported by this data type combination + :rtype: list + """ + return list(self.kernels_by_alignment.keys()) + + @property + def all_operations(self): + """ + Returns a list of all operations supported by this data type combination + + :return: list of all operations supported by this data type combination + :rtype: list + """ + ops = [] + for _, alignment_ops in self.kernels_by_alignment.items(): + ops.extend(alignment_ops) + return ops + + def operations(self, alignment: int): + """ + Returns operations satisfying the alignment constraint indicated by `alignment` + + :param alignment: alignment constraint of operations to return + :type alignment: int + + :return: list of operations + :rtype: list + """ + if alignment not in self.kernels_by_alignment: + raise Exception( + f"No operations of alignment {alignment} found for data type and layout " + f"combination {self.datatype_comb} {self.layout_comb}" + ) + return self.kernels_by_alignment[alignment] + + def find_alignment(self, shape: tuple, layout: cutlass.LayoutType) -> int: + """ + Returns the most preferable alignment for a given shape and layout + + :param shape: extent of each dimension of the tensor + :type shape: tuple + :param layout: layout of the tensor + :type layout: cutlass.LayoutType + + :return: maximum alignment supported by the data type combination and tensor size + :rtype: int + """ + # Determine the leading dimension of the shape + if layout == cutlass.LayoutType.ColumnMajor: + ld = shape[-2] + elif layout == cutlass.LayoutType.RowMajor: + ld = shape[-1] + elif layout == cutlass.LayoutType.TensorNHWC: + ld = shape[-1] + else: + raise Exception(f"Unexpected or unsupported layout {layout}") + + for alignment in sorted(list(self.kernels_by_alignment.keys()), reverse=True): + if ld % alignment == 0: + return alignment + + # Default to alignment of 1 if no others match + return 1 + + def sort(self): + """ + Sorts each list of kernels in `kernels_by_alignment` in descending order of threadblock shape + """ + key = lambda op: ( + op.tile_description.threadblock_shape[0] + * op.tile_description.threadblock_shape[1] + * op.tile_description.threadblock_shape[2] + ) + for alignment in self.kernels_by_alignment.keys(): + self.kernels_by_alignment[alignment].sort(key=key, reverse=True) + + +class ArchOptions: + """ + Structure for keeping track of kernels available on a given compute capability + + :param target_cc: compute capability of the device on which kernels will be run + :type target_cc: int + :param kernel_cc: compute capability of the kernels to generate + :type kernel_cc: int + :param operation_kind: type of operation to register + :type operation_kind: cutlass.OperationKind + :param gemm_kinds: types of GEMM operations that can be included + :type gemm_kinds: list + :param allowed_math_operations: types of primitive math operations allowed + :type allowed_math_operations: list + """ + + def __init__( + self, + target_cc: int, + kernel_cc: int, + operation_kind: cutlass.OperationKind, + gemm_kinds: list, + allowed_math_operations: list = [ + cutlass.MathOperation.multiply_add, + cutlass.MathOperation.multiply_add_saturate, + ] + ): + self.cc = kernel_cc + + # Dictionary with following structure: + # Key: OpcodeClass + # Value: Dictionary with the following structure: + # Key: tuple of ((DataType, DataType, DataType), (LayoutType, LayoutType, LayoutType), + # representing ((element_a, element_b, element_accumulator), (layout_a, layout_b)) + # Value: KernelsForDataType + self.operations_by_opclass = {} + self.op_class = None + self.allowed_math_operations = allowed_math_operations + + # Identify the method within CUTLASS generator script that generates kernel + # descriptions for the target CC + generate_function_name = "GenerateSM" + str(kernel_cc) + if not hasattr(cutlass_library.generator, generate_function_name): + cutlass.logger.warning(f"No generator found for architecture {kernel_cc}") + return + generate_function = getattr(cutlass_library.generator, generate_function_name) + + # Initialize a default manifest and populate it with valid kernel descriptions + # for the target CC + args = [ + "--kernels=all", + f"--log-level={logging.getLevelName(cutlass.logger.level)}" + ] + manifest_args = cutlass_library.generator.define_parser().parse_args(args) + manifest = cutlass_library.manifest.Manifest(manifest_args) + generate_function(manifest, _cuda_version) + + if operation_kind not in manifest.operations: + # No kernels generated for this architecture, this could be because the CUDA + # toolkit is insufficient to support operations in this CC + cutlass.logger.warning(f"No operations of type {operation_kind} found for CC {kernel_cc}") + return + + # Only one CC should be returned, given the setup above of calling only the generation scripts + # for a given CC + if len(manifest.operations[operation_kind].keys()) != 1 or kernel_cc not in manifest.operations[operation_kind]: + raise Exception(f"Error finding kernels for SM{kernel_cc}. Check that your CUDA toolkit version " + "is sufficient for the architecture in question.") + + # Iterate through the available operations for this operation kind and + # find available opclasses and data types + for name, op_list in manifest.operations[operation_kind][kernel_cc].items(): + for op in op_list: + if operation_kind == cutlass.OperationKind.Gemm: + if op.gemm_kind not in gemm_kinds: + continue + + mi = op.tile_description.math_instruction + if mi.math_operation not in self.allowed_math_operations: + continue + + if op.C.element == cutlass.DataType.void: + # The CUTLASS Python interface currently does not support void-C kernels + continue + + datatype_comb = (mi.element_a, mi.element_b, mi.element_accumulator) + + # Prune operations that don't fit in shared memory + td = td_from_profiler_op(op) + if not valid_stage_count(target_cc, kernel_cc, td)[0]: + continue + + if mi.opcode_class not in self.operations_by_opclass: + self.operations_by_opclass[mi.opcode_class] = {} + + datatype_comb = (mi.element_a, mi.element_b, mi.element_accumulator) + layout_comb = (op.A.layout, op.B.layout) + + # Register TF32 kernels as F32 to enable F32 -> TF32 conversion + TF32 Tensor Core operations + if datatype_comb == (cutlass.DataType.tf32, cutlass.DataType.tf32, cutlass.DataType.f32): + # TF32 kernels only supported on SM80 and beyond + if self.cc < 80: + continue + elif self.cc == 90: + if (op.A.element != cutlass.DataType.f32 + or op.B.element != cutlass.DataType.f32 + or op.C.element != cutlass.DataType.f32): + continue + + datatype_comb = (cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32) + + opclass_dict = self.operations_by_opclass[mi.opcode_class] + key = (datatype_comb, layout_comb) + if key not in opclass_dict: + opclass_dict[key] = KernelsForDataType(datatype_comb, layout_comb) + opclass_dict[key].add(op) + + # Set the default opclass to TensorOp, if available. Otherwise default to SIMT + if cutlass.OpcodeClass.TensorOp in self.operations_by_opclass: + self.op_class = cutlass.OpcodeClass.TensorOp + else: + self.op_class = cutlass.OpcodeClass.Simt + + # The profiler's generator may generate only a limited set of combinations of operands for SIMT kernels. + # Here, we generate additional versions via a generic TileDescription. + if cutlass.OpcodeClass.Simt not in self.operations_by_opclass: + self.operations_by_opclass[cutlass.OpcodeClass.Simt] = {} + + if operation_kind == cutlass.OperationKind.Gemm: + types = [ + (cutlass.DataType.s8, cutlass.DataType.s8, cutlass.DataType.s8), + (cutlass.DataType.s8, cutlass.DataType.s8, cutlass.DataType.s32), + (cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16), + (cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f32), + (cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32), + (cutlass.DataType.f64, cutlass.DataType.f64, cutlass.DataType.f64), + ] + + layouts = [ + (cutlass.LayoutType.RowMajor, cutlass.LayoutType.RowMajor), + (cutlass.LayoutType.RowMajor, cutlass.LayoutType.ColumnMajor), + (cutlass.LayoutType.ColumnMajor, cutlass.LayoutType.RowMajor), + (cutlass.LayoutType.ColumnMajor, cutlass.LayoutType.ColumnMajor), + ] + elif operation_kind == cutlass.OperationKind.Conv2d: + types = [ + (cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16), + (cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f32), + (cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32), + (cutlass.DataType.f64, cutlass.DataType.f64, cutlass.DataType.f64), + ] + + layouts = [ + (cutlass.LayoutType.TensorNHWC, cutlass.LayoutType.TensorNHWC), + ] + else: + raise NotImplementedError(f"Operation kind {operation_kind} is currently unsupported.") + + alignment = 1 + epilogue_functor = cutlass.EpilogueFunctor.LinearCombination + swizzling_functor = cutlass.SwizzlingFunctor.Identity8 + for type_comb in types: + for layout_comb in layouts: + comb = (type_comb, layout_comb) + if comb in self.operations_by_opclass[cutlass.OpcodeClass.Simt]: + continue + + A = cutlass.TensorDescription(type_comb[0], layout_comb[0], alignment) + B = cutlass.TensorDescription(type_comb[1], layout_comb[1], alignment) + C = cutlass.TensorDescription(type_comb[2], cutlass.LayoutType.ColumnMajor, alignment) + math_inst = cutlass.MathInstruction( + [1, 1, 1], + type_comb[0], + type_comb[1], + type_comb[2], + cutlass.OpcodeClass.Simt, + cutlass.MathOperation.multiply_add + ) + + td = cutlass.TileDescription( + [128, 128, 8], 2, [4, 2, 1], math_inst, 50, 1024) + + # Prune operations that don't fit in shared memory + if not valid_stage_count(target_cc, kernel_cc, td_from_profiler_td(td))[0]: + continue + + new_kernels = KernelsForDataType(type_comb, layout_comb) + + if operation_kind == cutlass.OperationKind.Gemm: + new_operation = cutlass_library.manifest.GemmOperation( + cutlass.GemmKind.Universal, td.minimum_compute_capability, + td, A, B, C, type_comb[2], epilogue_functor, swizzling_functor) + new_kernels.add(new_operation) + elif operation_kind == cutlass.OperationKind.Conv2d: + for conv_kind in [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad]: + new_operation = cutlass_library.manifest.Conv2dOperation( + conv_kind, IteratorAlgorithm.Analytic, td.minimum_compute_capability, td, + A, B, C, type_comb[2], StrideSupport.Strided, epilogue_functor, swizzling_functor, + group_mode=GroupMode.SingleGroup + ) + new_kernels.add(new_operation) + + self.operations_by_opclass[cutlass.OpcodeClass.Simt][comb] = new_kernels + + # Sort all operations + for oc in self.operations_by_opclass.keys(): + for comb in self.operations_by_opclass[oc].keys(): + self.operations_by_opclass[oc][comb].sort() + + def opclass_supports_combination( + self, op_class: cutlass.OpcodeClass, datatype_comb: tuple, layout_comb: tuple + ) -> bool: + """ + Returns whether the provided operation class supports the provided data type and layout combination + + :param op_class: operation class to consider + :type op_class: cutlass.OpcodeClass + :param datatype_comb: tuple of data types for (element_A, element_B, element_accumulator) + :type datatype_comb: tuple[cutlass.DataType] + :param layout_comb: tuple of data types for (layout_A, layout_B) + :type layout_comb: tuple[cutlass.LayoutType] + + :return: set of operation classes that support the provided data type and layout combination + :rtype: set + """ + if op_class not in self.operations_by_opclass: + raise Exception(f"Unexpected or unsupported operation class {op_class}") + + return (datatype_comb, layout_comb) in self.operations_by_opclass[op_class] + + def supporting_opclasses( + self, + element_a: cutlass.DataType, + element_b: cutlass.DataType, + element_accumulator: cutlass.DataType, + layout_a: cutlass.LayoutType, + layout_b: cutlass.LayoutType, + ) -> set: + """ + Returns a set of operation classes that support the provided data type combination + + :param element_a: data type of operand A + :type element_a: cutlass.DataType + :param element_b: data type of operand B + :type element_b: cutlass.DataType + :param element_accumulator: data type of accumulator + :type element_accumulator: cutlass.DataType + :param layout_a: layout of operand A + :type layout_a: cutlass.LayoutType + :param layout_b: layout of operand B + :type layout_b: cutlass.LayoutType + + :return: set of operation classes that support the provided data type combination + :rtype: set + """ + supporting_op_classes = set() + datatype_comb = (element_a, element_b, element_accumulator) + layout_comb = (layout_a, layout_b) + + for op_class in self.operations_by_opclass.keys(): + if self.opclass_supports_combination(op_class, datatype_comb, layout_comb): + supporting_op_classes.add(op_class) + return supporting_op_classes + + def operations( + self, + op_class: cutlass.OpcodeClass, + element_a: cutlass.DataType, + element_b: cutlass.DataType, + element_accumulator: cutlass.DataType, + layout_a: cutlass.LayoutType, + layout_b: cutlass.LayoutType, + ) -> KernelsForDataType: + """ + Returns whether the provided operation class supports the provided data type combination + + :param op_class: operation class to consider + :type op_class: cutlass.OpcodeClass + :param element_a: data type of operand A + :type element_a: cutlass.DataType + :param element_b: data type of operand B + :type element_b: cutlass.DataType + :param element_accumulator: data type of accumulator + :type element_accumulator: cutlass.DataType + :param layout_a: layout of operand A + :type layout_a: cutlass.LayoutType + :param layout_b: layout of operand B + :type layout_b: cutlass.LayoutType + + :return: container of kernels by alignment supported by the provided combination of parameters + :rtype: KernelsForDataType + """ + datatype_comb = (element_a, element_b, element_accumulator) + layout_comb = (layout_a, layout_b) + if not self.opclass_supports_combination(op_class, datatype_comb, layout_comb): + raise Exception( + f"Data type layout combination {datatype_comb}, {layout_comb} " + f"is not supported by opcode class {op_class} on CC {self.cc}." + ) + return self.operations_by_opclass[op_class][(datatype_comb, layout_comb)] + + +class OptionRegistry: + """ + Container of all architecture-specific options + + :param target_cc: compute capability of the device on which operations will be run + :type target_cc: int + """ + + def __init__(self, target_cc: int): + self.registry = {} + + gemm_kinds = [cutlass.GemmKind.Universal, cutlass.GemmKind.Universal3x] + operation_kinds = [cutlass.OperationKind.Gemm, cutlass.OperationKind.Conv2d] + # Construct options for each CC + for kernel_cc in _generator_ccs: + self.registry[kernel_cc] = {} + for opkind in operation_kinds: + self.registry[kernel_cc][opkind] = ArchOptions(target_cc, kernel_cc, opkind, gemm_kinds) + + def options_for_cc(self, cc: int, op_kind=cutlass.OperationKind.Gemm) -> ArchOptions: + return self.registry.get(cc, None)[op_kind] diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fb232c54e5cc036a17dcc504d24b8afca3fb62ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__init__.py @@ -0,0 +1,36 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +from cutlass.op.conv import Conv2d, Conv2dFprop, Conv2dDgrad, Conv2dWgrad +from cutlass.op.gemm import Gemm +from cutlass.op.gemm_grouped import GroupedGemm +from cutlass.op.op import OperationBase diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bc644df8f5b328d8c45ecb04c365b290c10e606 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/conv.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/conv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52196f1e20637614a51c0be10cc6d9ee95ea33f5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/conv.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/gemm.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/gemm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2542b4455d640ce9f00cfa9e69016716ca6ce8e3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/gemm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/gemm_grouped.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/gemm_grouped.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31488ce9495f73044b3cbb0e9d15c885d2a83702 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/gemm_grouped.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/op.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4982a558b28f0d8f79d787353877518662f8ca9e Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/__pycache__/op.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/conv.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/conv.py new file mode 100644 index 0000000000000000000000000000000000000000..396878592561fc7da36621bac8fda43e5b061ffa --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/conv.py @@ -0,0 +1,957 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" + Ease-of-use interface for constructing, compiling, and running CONVs + + The ``Conv2d`` interface is meant to allow one to easily instantiate, compile, and run + CONV2D operations in CUTLASS via Python, without specifying many configuration parameters. + Under the hood, the interface will select sensible default parameters for the many template + parameters for CUTLASS CONVs. + + Note: optimal performance is not to be expected from this interface. To achieve optimal + performance, one should specify and tune each configuration parameter. + + The simplest example of using this interface is the following: + + .. highlight:: python + .. code-block:: python + + # A, B, C, and D are torch/numpy/cupy tensor objects + plan = cutlass.op.Conv(A, B, C, D) + plan.run(stride=(1, 1), padding=(0, 0), dilation=(1, 1)) + + One can also use the interface by specifying data types of operands at construction + and using different tensor objects with these data types at runtime: + + .. highlight:: python + .. code-block:: python + + # The following is shorthand for: + # cutlass.op.Conv2d(kind="fprop", + # element_A=torch.float32, element_B=torch.float32, + # element_C=torch.float32, element_D=torch.float32, + # element_accumulator=torch.float32) + plan = cutlass.op.Conv2d(kind="fprop", element=torch.float32) + + A0 = torch.rand((128, 256), dtype=torch.float32, device='cuda') + B0 = torch.rand((256, 64), dtype=torch.float32, device='cuda') + C0 = torch.zeros((128, 64), dtype=torch.float32, device='cuda') + D0 = torch.zeros((128, 64), dtype=torch.float32, device.'cuda') + plan.run(A0, B0, C0, D0, stride=(1, 1), padding=(0, 0), dilation=(1, 1)) + + A = torch.rand((32, 128), dtype=torch.float32, device='cuda') + B = torch.rand((128, 256), dtype=torch.float32, device='cuda') + C = torch.zeros((32, 256), dtype=torch.float32, device='cuda') + D = torch.zeros((32, 256), dtype=torch.float32, device.'cuda') + plan.run(A1, B1, C1, D1, stride=(1, 1), padding=(0, 0), dilation=(1, 1)) + + The interface additionally enables one to decouple the compilation of the underlying CUTLASS + kernel from its execution: + + .. highlight:: python + .. code-block:: python + + plan = cutlass.op.Conv2d(kind="fprop", element=np.float32) + + # Do other work... + + plan.run(A0, B0, C0, D0, stride=(1, 1), padding=(0, 0), dilation=(1, 1)) + + # Do other work... + + plan.run(A1, B1, C1, D1, stride=(1, 1), padding=(0, 0), dilation=(1, 1)) + + Elementwise activation functions are easily fused to the GEMM via the interface: + + .. highlight:: python + .. code-block:: python + + plan = cutlass.op.Conv2d(kind="fprop", element=np.float32) + plan.activation = cutlass.epilogue.relu + + Operations can also be run asynchronously: + + .. highlight:: python + .. code-block:: python + + plan = cutlass.op.Conv2d(kind="fprop", element=np.float32) + args = plan.run() + + # Do other work... + + args.sync() +""" + +import cutlass +from cutlass import epilogue +from cutlass import ( + ConvKind, + ConvMode, + IteratorAlgorithm, + SplitKMode, + StrideSupport, +) +from cutlass.backend import compiler +from cutlass.backend.conv2d_operation import Conv2dArguments, Conv2dOperation +from cutlass.backend.reduction_operation import ReductionOperation, ReductionArguments +from cutlass.backend.library import TensorDescription, TileDescription +from cutlass.op.op import OperationBase +from cutlass.shape import Conv2DProblemSize, MatrixCoord +from cutlass.utils import check, datatypes + + +class Conv2d(OperationBase): + """ + Constructs a ``Conv2d`` object. + + The convolution kind (fprop, wgrad, degrad), the data types of operands A, B, and C, + along with the data type of output D and that used for accumulation, are bound to the ``Conv`` + object throughout its lifetime -- these are not to be changed after a ``Conv2d`` has been constructed. + + The constructor has optional parameters for flexibly setting these parameters. The following + constructors are equivalent: + + .. highlight:: python + .. code-block:: python + + # Use F32 for A, B, C, D, and accumulation in fprop + + # Use the generic ``element`` parameter to concisely set all data types for operands to the same values. + Conv2d(kind="fprop", element=cutlass.DataType.f32) + + # Explicitly specify the data types to use for A, B, C, and D. + Conv2d(kind="fprop", element_A=cutlass.DataType.f32, element_B=cutlass.DataType.f32, + element_C=cutlass.DataType.f32, element_D=cutlass.DataType.f32) + + # Set the data types and elements from existing tensors. Note that one can use different tensors when + # executing GEMM via the ``run()`` method than passed in here (though those passed in to ``run()`` must + # have the same data type as those passed in here). + # A, B, C, and D are torch.Tensor objects of type torch.float32 under the channel-last layout + Conv2d(kind="fprop", A=A, B=B, C=C, D=D) + + # Explicitly specify the data type for only some of A, B, C, and D. Unspecified data types will inherit + # those passed in via the generic ``element`` + Conv2d(kind="fprop", element_A=cutlass.DataType.f32, element_accumulator=cutlass.DataType.f32, + element=cutlass.DataType.f32) + + The order of precedence for the setting of the data type for a given operand/output is as follows: + 1) If the tensor type is specified (e.g., ``A``), use the data type inferred from this tensor + 2) Otherwise, if the data type (e.g., ``element_A``) is specified, use those + 3) Otherwise, use the generic values (e.g., ``element``) + + :param kind: the convolution kind (i.e. fprop, wgrad, and dgrad) + :type kind: str + :param A: tensor representing data type of operand A + :param B: tensor representing data type of operand B + :param C: tensor representing data type of operand C + :param D: tensor representing data type of operand D + :param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B + :param beta: scalar parameter beta from GEMM operation that scales operand C + :param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type + :type element: cutlass.DataType + :param element_A: data type to be used for operand A + :type element_A: cutlass.DataType + :param element_B: data type to be used for operand B + :type element_B: cutlass.DataType + :param element_C: data type to be used for operand C + :type element_C: cutlass.DataType + :param element_D: data type to be used for operand D + :type element_D: cutlass.DataType + :param element_accumulator: data type to be used in accumulation of the product of operands A and B + :type element_accumulator: cutlass.DataType + :param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90 + :type cc: int + :param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80 + :type kernel_cc: int + """ + def __init__( + self, kind="fprop", + A=None, B=None, C=None, D=None, alpha=1.0, beta=0.0, + element=None, + element_A=None, element_B=None, element_C=None, element_D=None, + element_accumulator=None, + cc: int = None, kernel_cc: int = None + ): + super().__init__(cc=cc, kernel_cc=kernel_cc, operation_kind=cutlass.OperationKind.Conv2d) + # Verify the kernel cc + if self.current_cc == 90: + # The Conv2d kernel on Hopper (SM90) is currently unsupported + # Revert to use SM80-tagged kernels + cutlass.logger.warning("Reverting to using SM80-tagged kernel. Opclass may change.") + self.specified_kernel_cc = 80 + self._reset_options(80) + + # The arch is used in testing + self.arch = self.current_cc + self.name = "conv2d" + kind + + # The convolution kind. (concept: cutlass_library.library.ConvKind) + self.conv_kind = datatypes.getattr_enum(ConvKind, kind) + + # The element types (concept: cutlass library types) of A, B, C, and D + elements = [] + layouts = [] + + # Complete the data types based on user-provided arguments + for elt, tens, name in zip([element_A, element_B, element_C, element_D], + [A, B, C, D], + ["A", "B", "C", "D"]): + if elt is not None and tens is not None: + raise Exception(f'Must not specify both element_{name} and tensor {name}') + if elt is None and tens is None and element is None: + raise Exception(f'Must specify one of element_{name}, tensor {name}, or generic element.') + + elt_to_set = None + lay_to_set = None + + if tens is not None: + elt_to_set, _ = datatypes.get_datatype_and_layout(tens) + else: + elt_to_set = elt if elt is not None else element + + assert elt_to_set is not None + + # Currently we only support layout TensorNHWC + lay_to_set = cutlass.LayoutType.TensorNHWC + elements.append(datatypes.library_type(elt_to_set)) + layouts.append(lay_to_set) + + self._element_a, self._element_b, self._element_c, self._element_d = elements + self._layout_a, self._layout_b, self._layout_c, self._layout_d = layouts + + self.A, self.B, self.C, self.D, self.alpha, self.beta = A, B, C, D, alpha, beta + + if element_accumulator is None: + self._element_accumulator = self._element_c + else: + self._element_accumulator = datatypes.library_type(element_accumulator) + + # Default inputs if none is supplied in run() + self.A = A + self.B = B + self.C = C + self.D = D + + self.alpha = alpha + self.beta = beta + + # We only specify the stride of the swizzling functor here + # The actual swizzling functor is determined in run based on conv_kind and stride + self._swizzling_stride = 1 + + # Arguments that will be set to default value in _reset_operations + # The default tile_description and op_class are fetched from manifest of cutlass library + self._tile_description = None + self.op_class = None + # The default identity epilogue will be created + self.epilogue_functor = None + + self._reset_operations() + + # Arguments that will be determined online based on arguments of "run" + # based on stride, input/output channels, alignment, and conv_kind + self._iterator_algorithm = None + self._stride_support = None + + def _reset_operations(self, reset_epilogue: bool = True): + # Set the default op class + datatype_comb = (self._element_a, self._element_b, self._element_accumulator) + layout_comb = (self._layout_a, self._layout_b) + + self.possible_op_classes = self.options.supporting_opclasses( + self._element_a, self._element_b, self._element_accumulator, + self._layout_a, self._layout_b + ) + + if cutlass.OpcodeClass.TensorOp in self.possible_op_classes: + self.opclass = cutlass.OpcodeClass.TensorOp + elif cutlass.OpcodeClass.Simt in self.possible_op_classes: + self.opclass = cutlass.OpcodeClass.Simt + else: + raise Exception(f'No kernel configuration found for supported data type and layout ' + f'combination {datatype_comb}x{layout_comb}') + + if reset_epilogue: + self._reset_epilogue_functor_activation(epilogue.identity) + + self.alignment_pref_A = min( + 128 // cutlass.DataTypeSize[self._element_a], max(self.possible_operations.alignments)) + self.alignment_pref_B = min( + 128 // cutlass.DataTypeSize[self._element_b], max(self.possible_operations.alignments)) + self.alignment_pref_C = min( + 128 // cutlass.DataTypeSize[self._element_c], max(self.possible_operations.alignments)) + + # + # Tile description Related + # + + @property + def tile_description(self) -> TileDescription: + """ + Returns the tile description + """ + return self._tile_description + + @tile_description.setter + def tile_description( + self, td=None): + """ + Set the tile description + + :param td: tile description + :type td: cutlass.backend.TileDescription, or a dict with keys + { + "threadblock_shape": [int, int, int], + "warp_count": [int, int, int], + "stages": int, + "instruction_shape": [int, int, int] (optional), + "cluster_shape": [int, int, int] (optional) + } + """ + if td is None: + return + if isinstance(td, dict): + if self._tile_description is None: + alignment = list(self.possible_operations.kernels_by_alignment.keys())[0] + op = self.possible_operations.operations(alignment)[0] + self._tile_description = datatypes.td_from_profiler_op(op) + if "cluster_shape" in td.keys(): + if td["cluster_shape"] != [1, 1, 1]: + cutlass.logger.warning("Conv2d currently only support 'cluster_shape'=[1, 1, 1]'.") + td["cluster_shape"] = [1, 1, 1] + td = self._tile_description.clone_and_update(td) + + valid, msg = self._valid_tile_description(td) + if valid: + self._tile_description = td + else: + raise Exception(msg) + + def _valid_tile_description(self, td: TileDescription) -> tuple: + """ + Checks whether the provided tile description is valid for the given compute capability. At present, + this checks the following: + + - Does the tile description use a number of stages supported by the compute capability in question? + - Does the tile size requested fit within shared memory? + - Are cluster dimensions outside the valid range requested for a given architecture (e.g., + more non-unit cluster dimensions for pre-SM90 architectures)? + - Is the kernel schedule being used supported on the architecture in question? + + :param td: tile description to validate + :type td: cutlass.backend.TileDescription + :return: tuple in which the first element is a bool indicating that the tile description is valid + and the second element is a string providing an optional error message. + :rtype: tuple + """ + valid, msg = check.valid_stage_count(self.cc, self.current_cc, td) + if not valid: + return (valid, msg) + + valid, msg = check.valid_cluster_shape(self.current_cc, td.cluster_shape) + if not valid: + return (valid, msg) + + return valid, msg + + def tile_descriptions(self) -> list: + """ + Returns a list of valid tile descriptions for the operations + + :returns: list of valid tile descriptions for the operations + :rtype: list + """ + descriptions = [] + description_str = [] + for op in self.possible_operations.all_operations: + td = datatypes.td_from_profiler_op(op) + if str(td) not in description_str: + description_str.append(str(td)) + descriptions.append(td) + return descriptions + + # + # Swizzling functor Related + # + + @property + def swizzling_stride(self): + """ + Returns the stride of swizzling currently being used by the Conv2d + + :return: swizzing stride + """ + return self._swizzling_stride + + @swizzling_stride.setter + def swizzling_stride(self, stride: int): + """ + Sets the swizzling functor to the type specified by `swizzling_functor` + """ + if not isinstance(stride, int): + raise Exception(f"Expect integer (1, 2, 4, 8), got {stride}") + self._swizzling_stride = stride + + def _propose_swizzling_functor(self, stride): + """ + Automatically propose the swizzling functor based on the stride + """ + if self.conv_kind == ConvKind.Dgrad: + if stride[0] != 1 or stride[1] != 1: + return getattr(cutlass.swizzle, f"StridedDgradIdentitySwizzle{self._swizzling_stride}") + + return getattr(cutlass.swizzle, f"IdentitySwizzle{self._swizzling_stride}") + + # + # Iterator Algorithm Related + # + + @property + def iterator_algorithm(self) -> IteratorAlgorithm: + """ + Returns the iterator algorithm + """ + return self._iterator_algorithm + + @iterator_algorithm.setter + def iterator_algorithm(self, alg: str): + """ + Sets the iterator algorithm + + :param alg: The iterator algorithm + :type td: string, options: "analytic", "optimized", "few_channels", and "fixed_channels" + """ + iterator_alg = datatypes.getattr_enum(IteratorAlgorithm, alg) + + # Check if the iterator algorithm is valid + if iterator_alg in [IteratorAlgorithm.FewChannels, IteratorAlgorithm.FixedChannels] and self.conv_kind != ConvKind.Fprop: + raise Exception(f"{self.conv_kind} does not support iterator algorithm {alg}.") + + self._iterator_algorithm = iterator_alg + + def _propose_iterator_algorithm(self, problem_size, alignment_a, alignment_b) -> IteratorAlgorithm: + """ + Propose a valid iterator algorithm based on problem size and alignment + """ + if self.conv_kind == ConvKind.Fprop: + # Check whether the fixed channel is applicable + if problem_size.C == alignment_a: + return IteratorAlgorithm.FixedChannels + elif (problem_size.C % alignment_a == 0 and + problem_size.R <= 32 and problem_size.S <= 32): + return IteratorAlgorithm.Optimized + else: + return IteratorAlgorithm.Analytic + elif self.conv_kind == ConvKind.Dgrad: + if (problem_size.K % alignment_a == 0 and + problem_size.R <= 32 and problem_size.S <= 32 and + problem_size.C % alignment_b == 0): + return IteratorAlgorithm.Optimized + else: + return IteratorAlgorithm.Analytic + elif self.conv_kind == ConvKind.Wgrad: + if (problem_size.K % alignment_a == 0 and + problem_size.C % alignment_b == 0): + return IteratorAlgorithm.Optimized + else: + return IteratorAlgorithm.Analytic + + def _validate_iterator_algorithm(self, iterator_algorithm, problem_size, alignment_a, alignment_b) -> bool: + """ + Validate whether the user provide iterator algorithm works for the given problem size + """ + if self.conv_kind == ConvKind.Fprop: + if iterator_algorithm == IteratorAlgorithm.FixedChannels: + return problem_size.C == alignment_a + elif iterator_algorithm == IteratorAlgorithm.Optimized: + return (problem_size.C % alignment_a == 0 and + problem_size.R <= 32 and problem_size.S <= 32) + elif iterator_algorithm == IteratorAlgorithm.FewChannels: + return problem_size.C % alignment_a == 0 + elif self.conv_kind == ConvKind.Dgrad: + if iterator_algorithm == IteratorAlgorithm.Optimized: + return (problem_size.K % alignment_a == 0 and + problem_size.R <= 32 and problem_size.S <= 32 and + problem_size.C % alignment_b == 0) + elif self.conv_kind == ConvKind.Wgrad: + if iterator_algorithm == IteratorAlgorithm.Optimized: + return (problem_size.K % alignment_a == 0 and + problem_size.C % alignment_b == 0) + + return True + + # + # Stride Support Related + # + + def _propose_stride_support(self, stride): + if self.conv_kind == ConvKind.Dgrad: + if stride[0] == 1 and stride[1] == 1: + return StrideSupport.Unity + + return StrideSupport.Strided + + # + # Construct and Compilation + # + + def construct( + self, tile_description: TileDescription = None, + alignment_A: int = None, alignment_B: int = None, alignment_C: int = None, + iterator_algorithm: IteratorAlgorithm = None, + stride_support = None, swizzling_functor: cutlass.swizzle = None, + epilogue_functor=None) -> cutlass.backend.Conv2dOperation: + """ + Constructs a ``cutlass.backend.Conv2dOperation`` based on the input parameters and current + kernel specification of the ``Conv2d`` object. + + :param tile_description: tile description specifying shapes and operand types to use in the kernel + :type tile_description: cutlass.backend.TileDescription + :param alignment_A: alignment of operand A + :type alignment_A: int + :param alignment_B: alignment of operand B + :type alignment_B: int + :param alignment_C: alignment of operand C + :type alignment_C: int + :param iterator_algorithm: the iterator algorithm used + :type iterator_algorithm: cutlass_library.library.IteratorAlgorithm + :param stride_support: the stride support of dgrad + :type stride_support: cutlass_library.library.StrideSupport + :param swizzling_functor: the swizzling functor + :type swizzling_functor: cutlass.swizzle + :param epilogue_functor: the epilogue functor + + :return: operation that was constructed + :rtype: cutlass.backend.Conv2dOperation + """ + # Get alignment + alignment_A = check.alignment_or_default(alignment_A, self.alignment_pref_A) + alignment_B = check.alignment_or_default(alignment_B, self.alignment_pref_B) + alignment_C = check.alignment_or_default(alignment_C, self.alignment_pref_C) + + tensor_A = TensorDescription(self._element_a, self._layout_b, alignment_A) + tensor_B = TensorDescription(self._element_b, self._layout_b, alignment_B) + tensor_C = TensorDescription(self._element_c, self._layout_c, alignment_C) + + if tile_description is None: + if self.tile_description is not None: + tile_description = self.tile_description + else: + min_alignment = min([alignment_A, alignment_B, alignment_C]) + op = self.possible_operations.operations(min_alignment)[0] + tile_description = datatypes.td_from_profiler_op(op) + else: + valid, err_str = self._valid_tile_description(tile_description) + if not valid: + raise Exception(f"Invalid tile description. {err_str}") + self.tile_description = tile_description + + if iterator_algorithm is None: + # If the iterator algorithm is already set + if self.iterator_algorithm is not None: + iterator_algorithm = self.iterator_algorithm + else: + # Otherwise, we conservatively use the analytic iterator for correctness + iterator_algorithm = IteratorAlgorithm.Analytic + + if stride_support is None: + # If the stride support is already set + if self._stride_support is not None: + stride_support = self._stride_support + else: + # Otherwise, we assume strided + stride_support = StrideSupport.Strided + + if swizzling_functor is None: + # If the swizzling functor is already set + swizzling_functor = self._propose_swizzling_functor(stride=(2, 2)) + + if epilogue_functor is None: + if self.epilogue_functor is not None: + epilogue_functor = self.epilogue_functor + else: + epilogue_functor = self._create_epilogue_functor_activation(self._activation) + + # Reset the alignment of the epilogue functor + epilogue_functor = self._reset_epilogue_functor_alignment(alignment_C, epilogue_functor) + + operation = Conv2dOperation( + conv_kind=self.conv_kind, + iterator_algorithm=iterator_algorithm, + arch=self.current_cc, + tile_description=tile_description, + A=tensor_A, B=tensor_B, C=tensor_C, + stride_support=stride_support, + epilogue_functor=epilogue_functor, + swizzling_functor=swizzling_functor, + ) + + return operation + + def compile(self, tile_description: TileDescription = None, + alignment_A: int = None, alignment_B: int = None, alignment_C: int = None, + iterator_algorithm: IteratorAlgorithm = None, + stride_support = None, swizzling_functor: cutlass.swizzle = None, + epilogue_functor = None, print_module: bool = False) -> cutlass.backend.Conv2dOperation: + """ + Emits and compiles the kernel currently specified. If ``tile_description`` and any + of the ``alignment`` parameters are set, the kernel will be chosen using this + tile description and alignments. Otherwise, a default tile description and alignment + will be used. + + ::param tile_description: tile description specifying shapes and operand types to use in the kernel + :type tile_description: cutlass.backend.TileDescription + :param alignment_A: alignment of operand A + :type alignment_A: int + :param alignment_B: alignment of operand B + :type alignment_B: int + :param alignment_C: alignment of operand C + :type alignment_C: int + :param iterator_algorithm: the iterator algorithm used + :type iterator_algorithm: cutlass_library.library.IteratorAlgorithm + :param stride_support: the stride support of dgrad + :type stride_support: cutlass_library.library.StrideSupport + :param swizzling_functor: the swizzling functor + :type swizzling_functor: cutlass.swizzle + :param epilogue_functor: the epilogue functor + + :return: operation that was compiled + :rtype: cutlass.backend.Conv2dOperation + """ + + self.operation = self.construct( + tile_description, alignment_A, alignment_B, alignment_C, + iterator_algorithm, stride_support, swizzling_functor, epilogue_functor) + + if print_module: + print(self.operation.rt_module.emit()) + + compiler.add_module([self.operation,]) + return self.operation + + # + # Run Related + # + + def _verify_type_and_layout(self, tensor, ref_type, ref_layout, name): + """ + Verifies that ``tensor`` has data type ``ref_type`` and layout ``ref_layout``. An exception + is raised if it does not. + + :param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in + :type tensor: numpy/cupy/torch array/tensor object + :param ref_dtype: data type for the tensor that this object was initialized to + :param name: identifier of the tensor to verify. Used in raising exceptions + :type name: str + """ + dtype, _ = datatypes.get_datatype_and_layout(tensor) + if dtype != ref_type: + raise Exception(f'Tensor {name} with type and layout {dtype} ' + f'does not match the expected type of {ref_type}.') + + def _get_and_verify_conv_problem_size(self, A, B, C, stride, padding, dilation): + if self.conv_kind == ConvKind.Fprop: + input = A + weight = B + output = C + output_tensor = "C" + elif self.conv_kind == ConvKind.Dgrad: + output = A + weight = B + input = C + output_tensor = "A" + elif self.conv_kind == ConvKind.Wgrad: + output = A + input = B + weight = C + output_tensor = "A" + else: + raise Exception(f"Convolution kind {self.conv_kind} is not supported") + + N_, H_, W_, C_ = datatypes.get_tensor_shape(input, op="CONV") + K_, R_, S_, _ = datatypes.get_tensor_shape(weight, op="CONV") + _, P_, Q_, _ = datatypes.get_tensor_shape(output, op="CONV") + + problem_size = Conv2DProblemSize( + N_, H_, W_, C_, + K_, R_, S_, C_, + padding[0], padding[1], + stride[0], stride[1], + dilation[0], dilation[1], + ConvMode.CrossCorrelation, + 1, 1 + ) + + if P_ != problem_size.P or Q_ != problem_size.Q: + raise Exception( + f"Tensor {output_tensor} size should be ({N_}, {problem_size.P}, {problem_size.Q}, {K_}), got ({N_}, {P_}, {Q_}, {K_})") + + return problem_size + + def run(self, A=None, B=None, C=None, D=None, + stride=(1, 1), padding=(0, 0), dilation=(1, 1), + alpha=None, beta=None, + split_k=("serial", 1), sync: bool = True, + print_module: bool = False) -> Conv2dArguments: + """ + Runs the kernel currently specified. If it has not already been, the kernel is emitted and + compiled. Tensors holding operands and outputs of the kernel are sourced either from the + ``A``, ``B``, ``C``, ``D``, ``alpha``, and ``beta`` + parameters provided in the call, or from those + passed in on the construction of this object -- one of the two must be specified. + + By default, this call returns only once the kernel has completed. To launch the kernel + and immediately return, set ``sync=False``. In this case, it is the responsibility of the + caller to syncrhonize the results of the kernel before attempting to access outputs + by calling ``sync()`` on the arguments returned from this call. + + :param A: tensor representing data type and layout of operand A + :param B: tensor representing data type and layout of operand B + :param C: tensor representing data type and layout of operand C + :param D: tensor representing data type and layout of operand D + :param stride: (stride_h, stride_w) describing the convolution stride. Default: (1, 1) + :param padding: (pad_h, pad_w) describing the convolution padding. Default: (0, 0) + :param dilation: (dilation_h, dilation_w) describing the dilation of convolution. Default: (1, 1) + :param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B + :param beta: scalar parameter beta from GEMM operation that scales operand C + :param split_k: a tuple (split_k_mode, split_k_slices) + :param sync: whether the call should wait for the kernel to complete before returning + :type sync: bool + :param print_module: whether to print the emitted C++ code + :type print_module: bool + + :return: arguments passed in to the kernel + :rtype: cutlass.backend.Conv2dArguments + """ + A = self._verify_tensor(A, self.A, self._element_a, self._layout_a, "A") + B = self._verify_tensor(B, self.B, self._element_b, self._layout_b, "B") + C = self._verify_tensor(C, self.C, self._element_c, self._layout_c, "C") + D = self._verify_tensor(D, self.D, self._element_d, self._layout_d, "D") + alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha") + beta = self._verify_scalar(beta, self.beta, self._element_c, "beta") + + # handle the case when there is no C + if C is None: + if beta != 0: + raise Exception(f"With beta {beta} != 0, C has to be provided.") + else: + C = D + + # Construct problem size based on input + # It also verifies whether the A, B, C, D, stride, padding, and dilation are matching + problem_size = self._get_and_verify_conv_problem_size(A, B, C, stride, padding, dilation) + + # Propose stride support based on input + stride_support = self._propose_stride_support(stride) + + # Propose swizzling functor + swizzling_functor = self._propose_swizzling_functor(stride) + + shape_a = datatypes.get_tensor_shape(A, op="CONV") + shape_b = datatypes.get_tensor_shape(B, op="CONV") + shape_c = datatypes.get_tensor_shape(C, op="CONV") + + # Get the alignment + alignment_a = self.possible_operations.find_alignment(shape_a, self._layout_a) + alignment_b = self.possible_operations.find_alignment(shape_b, self._layout_b) + alignment_c = self.possible_operations.find_alignment(shape_c, self._layout_c) + + alignment_a = check.update_alignment(alignment_a, self.alignment_pref_A) + alignment_b = check.update_alignment(alignment_b, self.alignment_pref_B) + alignment_c = check.update_alignment(alignment_c, self.alignment_pref_C) + + # Propose iterator algorithm based on input + if self._iterator_algorithm is None: + # Propose a default iterator algorithm based on the problem size + iterator_algorithm = self._propose_iterator_algorithm(problem_size, alignment_a, alignment_b) + else: + if (self._validate_iterator_algorithm(self._iterator_algorithm, problem_size, alignment_a, alignment_b)): + iterator_algorithm = self._iterator_algorithm + else: + raise Exception(f"Iterator algorithm {self._iterator_algorithm} is invalid for current problem.") + + epilogue_args = [alpha, beta] + + if hasattr(self, "_activation_args"): + if isinstance(self._activation_args, list): + epilogue_args += self._activation_args + else: + epilogue_args.append(self._activation_args) + + if split_k[0] == "parallel" and split_k[1] > 1: + epilogue_functor = self._create_epilogue_functor_activation(epilogue.identity) + else: + epilogue_functor = self.epilogue_functor + + # The alignment is determined by the iterator function (I believe) + self.compile(tile_description=self.tile_description, alignment_A=alignment_a, alignment_B=alignment_b, + alignment_C=alignment_c, iterator_algorithm=iterator_algorithm, stride_support=stride_support, + swizzling_functor=swizzling_functor, epilogue_functor=epilogue_functor, print_module=print_module) + + # Create reduction operation for parallel split-k + if split_k[0] == "parallel" and split_k[1] > 1: + epilogue_functor_reduction = self._reset_epilogue_functor_alignment(alignment_c, self.epilogue_functor) + self.reduction_operation = ReductionOperation( + shape=MatrixCoord(4, 32 * alignment_c), C=self.operation.C, + element_accumulator=self._element_accumulator, + element_compute=self._element_accumulator, + epilogue_functor=epilogue_functor_reduction, + count=alignment_c + ) + if print_module: + print(self.reduction_operation.rt_module.emit()) + compiler.add_module([self.reduction_operation,]) + + arguments = Conv2dArguments( + operation=self.operation, problem_size=problem_size, + A=A, B=B, C=C, D=D, + output_op=self.operation.epilogue_type(*epilogue_args), + split_k_mode=datatypes.getattr_enum(SplitKMode, split_k[0]), + split_k_slices=split_k[1] + ) + + self.operation.run(arguments) + + if split_k[0] == "parallel" and split_k[1] > 1: + implicit_gemm_size = arguments.problem_size.implicit_gemm_size(self.conv_kind) + reduction_arguments = ReductionArguments( + self.reduction_operation, + problem_size=[implicit_gemm_size.m, implicit_gemm_size.n], + partitions=split_k[1], + workspace=arguments.ptr_D, + destination=D, + source=C, + output_op=self.reduction_operation.epilogue_type(*epilogue_args) + ) + self.reduction_operation.run(reduction_arguments) + + if sync: + if split_k[0] == "parallel" and split_k[1] > 1: + reduction_arguments.sync() + else: + arguments.sync() + + return arguments + + # + # Helper functions + # + @staticmethod + def output_size(input_size, weight_size, padding, stride, dilation): + problem_size = Conv2DProblemSize( + *input_size, + *weight_size, + padding[0], padding[1], + stride[0], stride[1], + dilation[0], dilation[1], + ConvMode.CrossCorrelation, + 1, 1 + ) + return (problem_size.N, problem_size.P, problem_size.Q, problem_size.K) + + +# +# Easy to use interfaces for fprop, wgrad, and dgrad +# + +class Conv2dFprop(Conv2d): + def __init__( + self, + input=None, weight=None, C=None, output=None, alpha=1, beta=0, + element=None, + element_input=None, element_weight=None, element_C=None, element_output=None, + element_accumulator=None, + cc: int = None, kernel_cc: int = None): + A, B, D = input, weight, output + element_A, element_B, element_D = element_input, element_weight, element_output + super().__init__( + "fprop", A, B, C, D, alpha, beta, element, + element_A, element_B, element_C, element_D, + element_accumulator, cc, kernel_cc) + + def run( + self, input=None, weight=None, C=None, output=None, alpha=None, beta=None, + stride=(1, 1), padding=(0, 0), dilation=(1, 1), split_k=("serial", 1), + sync: bool = True, print_module: bool = False) -> Conv2dArguments: + + A, B, D = input, weight, output + return super().run( + A, B, C, D, alpha, beta, stride, padding, dilation, split_k, sync, print_module) + + +class Conv2dDgrad(Conv2d): + def __init__( + self, + grad_output=None, weight=None, C=None, grad_input=None, alpha=1, beta=0, + element=None, + element_grad_output=None, element_weight=None, element_C=None, element_grad_input=None, + element_accumulator=None, + cc: int = None, kernel_cc: int = None): + A, B, D = grad_output, weight, grad_input + element_A, element_B, element_D = element_grad_output, element_weight, element_grad_input + super().__init__( + "dgrad", A, B, C, D, alpha, beta, element, + element_A, element_B, element_C, element_D, + element_accumulator, cc, kernel_cc) + + def run(self, grad_output=None, weight=None, C=None, grad_input=None, alpha=None, beta=None, + stride=(1, 1), padding=(0, 0), dilation=(1, 1), split_k=("serial", 1), + sync: bool = True, print_module: bool = False) -> Conv2dArguments: + # + A, B, D = grad_output, weight, grad_input + return super().run( + A, B, C, D, alpha, beta, stride, padding, dilation, split_k, sync, print_module) + + +class Conv2dWgrad(Conv2d): + def __init__( + self, + grad_output=None, input=None, C=None, grad_weight=None, alpha=1, beta=0, + element=None, + element_grad_output=None, element_input=None, element_C=None, element_grad_weight=None, + element_accumulator=None, + cc: int = None, kernel_cc: int = None): + A, B, D = grad_output, input, grad_weight + element_A, element_B, element_D = element_grad_output, element_input, element_grad_weight + super().__init__( + "wgrad", A, B, C, D, alpha, beta, element, + element_A, element_B, element_C, element_D, + element_accumulator, cc, kernel_cc) + + def run(self, grad_output=None, input=None, C=None, grad_weight=None, alpha=None, beta=None, + stride=(1, 1), padding=(0, 0), dilation=(1, 1), split_k=("serial", 1), + sync: bool = True, print_module: bool = False) -> Conv2dArguments: + # + A, B, D = grad_output, input, grad_weight + return super().run( + A, B, C, D, alpha, beta, stride, padding, dilation, split_k, sync, print_module) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/gemm.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/gemm.py new file mode 100644 index 0000000000000000000000000000000000000000..3046e34dbbe3af8f787ca563778ffdbf26644184 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/gemm.py @@ -0,0 +1,679 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" + Ease-of-use interface for constructing, compiling, and running GEMMs. + + The ``Gemm`` interface is meant to allow one to easily instantiate, compile, and run + GEMM operations in CUTLASS via Python, without specifying many configuration parameters. + Under the hood, the interface will select sensible default parameters for the many template + parameters for CUTLASS GEMMs. + + Note: optimal performance is not to be expected from this interface. To achieve optimal + performance, one should specify and tune each configuration parameter. + + The simplest example of using this interface is the following: + + .. highlight:: python + .. code-block:: python + + # A, B, C, and D are torch/numpy/cupy tensor objects + plan = cutlass.op.Gemm(A, B, C, D) + plan.run() + + + One can also use the interface by specifying data types of operands at construction + and using different tensor objects with these data types at runtime: + + .. highlight:: python + .. code-block:: python + + # The following is shorthand for: + # cutlass.op.Gemm(element_A=torch.float32, element_B=torch.float32, + # element_C=torch.float32, element_D=torch.float32, + # element_accumulator=torch.float32, + # layout=cutlass.LayoutType.RowMajor) + plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass.LayoutType.RowMajor) + + A0 = torch.rand((128, 256), device='cuda') + B0 = torch.rand((256, 64), device='cuda') + C0 = torch.zeros((128, 64), device='cuda') + D0 = torch.zeros((128, 64), device.'cuda') + plan.run(A0, B0, C0, D0) + + A = torch.rand((32, 128), device='cuda') + B = torch.rand((128, 256), device='cuda') + C = torch.zeros((32, 256), device='cuda') + D = torch.zeros((32, 256), device.'cuda') + plan.run(A1, B1, C1, D1) + + The interface additionally enables one to decouple the compilation of the underlying CUTLASS + kernel from its execution: + + .. highlight:: python + .. code-block:: python + + plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor) + plan.compile() + + # Do other work... + + plan.run(A0, B0, C0, D0) + + # Do other work... + + plan.run(A1, B1, C1, D1) + + Elementwise activation functions are easily fused to the GEMM via the interface: + + .. highlight:: python + .. code-block:: python + + plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor) + plan.activation = cutlass.epilogue.relu + + Operations can also be run asynchronously: + + .. highlight:: python + .. code-block:: python + + plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor) + args = plan.run() + + # Do other work... + + args.sync() +""" + +from math import prod + +import cutlass +from cutlass import ( + epilogue, + swizzle, + GemmUniversalMode, +) +from cutlass.backend import compiler +from cutlass.backend.evt import EpilogueFunctorVisitor +from cutlass.backend.gemm_operation import GemmArguments, GemmOperationUniversal +from cutlass.backend.library import TensorDescription, TileDescription +from cutlass.op.op import OperationBase +from cutlass.shape import GemmCoord +from cutlass.utils import check, datatypes + + +class Gemm(OperationBase): + """ + Constructs a ``Gemm`` object. + + The data types and layouts of operands A, B, and C, along with the data type of output D + and that used for accumulation, are bound to the ``Gemm`` object throughout its lifetime -- + these are not to be changed after a ``Gemm`` has been constructed. + + The constructor has optional parameters for flexibly setting these parameters. The following + constructors are equivalent: + + .. highlight:: python + .. code-block:: python + + # Use F32 for A, B, C, D, and accumulation. All operands are row major. + + # Use the generic ``element`` and ``layout`` parameters to concisely set all data types and layouts + # for operands to the same values. + Gemm(element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor) + + # Explicitly specify the data types to use for A, B, C, and D. Use the generic ``layout``. + Gemm(element_A=cutlass.DataType.f32, element_B=cutlass.DataType.f32, element_C=cutlass.DataType.f32, + element_D=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor) + + # Set the data types and elements from existing tensors. Note that one can use different tensors when + # executing GEMM via the ``run()`` method than passed in here (though those passed in to ``run()`` must + # have the same data type and layout as those passed in here). + # A, B, C, and D are row-major torch.Tensor objects of type torch.float32 + Gemm(A=A, B=B, C=C, D=D) + + # Use the generic ``element`` and explicitly specify the layouts to use for A, B, and C (layout of D is + # the same as that for D, at present) + Gemm(element=cutlass.DataType.f32, layout_A=cutlass.LayoutType.RowMajor, + layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.RowMajor) + + # Explicitly specify the data type and layout for only some of A, B, C, and D. Unspecified data types + # and layouts will inherit those passed in via the generic ``element`` and ``layout`` + Gemm(element_A=cutlass.DataType.f32, layout_B=cutlass.LayoutType.RowMajor, + element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor) + + The order of precedence for the setting of the data type and layout for a given operand/output is as follows: + 1) If the tensor type is specified (e.g., ``A``), use the data type and layout inferred from this tensor + 2) Otherwise, if the data type/layout (e.g., ``element_A``, ``layout_A``) is specified, use those + 3) Otherwise, use the generic values (e.g., ``element``, ``layout``) + + :param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90 + :type cc: int + :param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80 + :type kernel_cc: int + :param A: tensor representing data type and layout of operand A + :param B: tensor representing data type and layout of operand B + :param C: tensor representing data type and layout of operand C + :param D: tensor representing data type and layout of operand D + :param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B + :param beta: scalar parameter beta from GEMM operation that scales operand C + :param element_accumulator: data type to be used in accumulation of the product of operands A and B + :type element_accumulator: cutlass.DataType + :param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type + :type element: cutlass.DataType + :param layout: generic layout type to be used for operands A, B, C, and D + :type layout: cutlass.LayoutType + :param element_A: data type to be used for operand A + :type element_A: cutlass.DataType + :param element_B: data type to be used for operand B + :type element_B: cutlass.DataType + :param element_C: data type to be used for operand C + :type element_C: cutlass.DataType + :param element_D: data type to be used for operand D + :type element_D: cutlass.DataType + :type layout_A: layout of operand A + :param layout_A: cutlass.LayoutType + :type layout_B: layout of operand B + :param layout_B: cutlass.LayoutType + :type layout_C: layout of operand C + :param layout_C: cutlass.LayoutType + :type layout_D: layout of operand D + :param layout_D: cutlass.LayoutType + """ + + def __init__( + self, A=None, B=None, C=None, D=None, + alpha=1.0, beta=0.0, element_accumulator=None, + element=None, layout=None, + element_A=None, element_B=None, element_C=None, element_D=None, + layout_A=None, layout_B=None, layout_C=None, + cc: int = None, kernel_cc: int = None + ): + super().__init__(cc=cc, kernel_cc=kernel_cc) + self.name = "gemm" + self.compiled = False + + elements = [] + layouts = [] + + # Check that at least one of the following is set for each tensor (illustrated assuming tensor A): + # ``A``, ``element_A``, ``element`` and ``A``, ``layout_A``, ``layout`` + for elt, lay, tens, name in zip([element_A, element_B, element_C, element_D], + [layout_A, layout_B, layout_C, layout_C], + [A, B, C, D], + ["A", "B", "C", "D"]): + if elt is not None and tens is not None: + raise Exception(f'Must not specify both element_{name} and tensor {name}') + if lay is not None and tens is not None: + raise Exception(f'Must not specify both layout_{name} and tensor {name}') + if elt is None and tens is None and element is None: + raise Exception(f'Must specify one of element_{name}, tensor {name}, or generic element.') + if lay is None and tens is None and layout is None: + raise Exception(f'Must specify one of layout_{name}, tensor {name}, or generic layout.') + + elt_to_set = None + lay_to_set = None + if tens is not None: + elt_to_set, lay_to_set = datatypes.get_datatype_and_layout(tens) + else: + elt_to_set = elt if elt is not None else element + lay_to_set = lay if lay is not None else layout + + elements.append(datatypes.library_type(elt_to_set)) + layouts.append(lay_to_set) + + self._element_a, self._element_b, self._element_c, self._element_d = elements + self._layout_a, self._layout_b, self._layout_c, self._layout_d = layouts + + if element_accumulator is None: + self._element_accumulator = self._element_c + else: + self._element_accumulator = datatypes.library_type(element_accumulator) + + self.A = A + self.B = B + self.C = C + self.D = D + + self.alpha = alpha + self.beta = beta + + self.epilogue_functor = None + self.op_class = None + self._tile_description = None + + self._reset_operations() + + self._swizzling_functor = cutlass.swizzle.IdentitySwizzle1 + + def _reset_operations(self, reset_epilogue: bool = True): + # Set the default op class + datatype_comb = (self._element_a, self._element_b, self._element_accumulator) + layout_comb = (self._layout_a, self._layout_b) + self.possible_op_classes = self.options.supporting_opclasses( + self._element_a, self._element_b, self._element_accumulator, + self._layout_a, self._layout_b) + + if cutlass.OpcodeClass.TensorOp in self.possible_op_classes: + self.opclass = cutlass.OpcodeClass.TensorOp + elif cutlass.OpcodeClass.Simt in self.possible_op_classes: + self.opclass = cutlass.OpcodeClass.Simt + else: + raise Exception(f'No kernel configuration found for supported data type and layout ' + f'combination {datatype_comb}x{layout_comb}') + + if reset_epilogue: + self._reset_epilogue_functor_activation(epilogue.identity) + + @property + def swizzling_functor(self): + """ + Returns the type of the swizzling functor currently being used by the GEMM + + :return: swizzing functor type + """ + return self._swizzling_functor + + @swizzling_functor.setter + def swizzling_functor(self, swizzling_functor): + """ + Sets the swizzling functor to the type specified by `swizzling_functor` + """ + if swizzling_functor == swizzle.ThreadblockSwizzleStreamK: + if self.op_class == cutlass.OpcodeClass.Simt: + raise Exception('ThreadblockSwizzleStreamK is currently only supported with opcode class TensorOp') + + if self.current_cc == 90: + raise Exception('ThreadblockSwizzleStreamK is currently unsupported on SM90') + self._swizzling_functor = swizzling_functor + + # + # Tile description Related + # + + @property + def tile_description(self) -> TileDescription: + """ + Returns the tile description + """ + return self._tile_description + + @tile_description.setter + def tile_description( + self, td=None): + """ + Set the tile description + + :param td: tile description + :type td: cutlass.backend.TileDescription, or a dict with keys + { + "threadblock_shape": [int, int, int], + "warp_count": [int, int, int], + "stages": int, + "instruction_shape": [int, int, int] (optional), + "cluster_shape": [int, int, int] (optional) + } + """ + if td is None: + return + if isinstance(td, dict): + if self._tile_description is None: + alignment = list(self.possible_operations.kernels_by_alignment.keys())[0] + op = self.possible_operations.operations(alignment)[0] + self._tile_description = datatypes.td_from_profiler_op(op) + td = self._tile_description.clone_and_update(td) + + valid, msg = self._valid_tile_description(td) + if valid: + self._tile_description = td + else: + raise Exception(msg) + + def _valid_tile_description(self, td: TileDescription) -> tuple: + """ + Checks whether the provided tile description is valid for the given compute capability. At present, + this checks the following: + + - Does the tile description use a number of stages supported by the compute capability in question? + - Does the tile size requested fit within shared memory? + - Are cluster dimensions outside the valid range requested for a given architecture (e.g., + more non-unit cluster dimensions for pre-SM90 architectures)? + - Is the kernel schedule being used supported on the architecture in question? + + :param td: tile description to validate + :type td: cutlass.backend.TileDescription + :return: tuple in which the first element is a bool indicating that the tile description is valid + and the second element is a string providing an optional error message. + :rtype: tuple + """ + valid, msg = check.valid_stage_count(self.cc, self.current_cc, td, self._element_c, self._element_d) + if not valid: + return (valid, msg) + + valid, msg = check.valid_cluster_shape(self.current_cc, td.cluster_shape) + if not valid: + return (valid, msg) + + valid, msg = check.valid_schedule(self.current_cc, td.kernel_schedule, td.epilogue_schedule, td.tile_scheduler) + return valid, msg + + def tile_descriptions(self) -> list: + """ + Returns a list of valid tile descriptions for the operations + + :returns: list of valid tile descriptions for the operations + :rtype: list + """ + return [datatypes.td_from_profiler_op(op) for op in self.possible_operations.all_operations] + + def construct( + self, tile_description: TileDescription = None, + alignment_A: int = None, alignment_B: int = None, alignment_C: int = None) -> GemmOperationUniversal: + """ + Constructs a ``cutlass.backend.GemmUniversalOperation`` based on the input parameters and current + kernel specification of the ``Gemm`` object. + + :param tile_description: tile description specifying shapes and operand types to use in the kernel + :type tile_description: cutlass.backend.TileDescription + :param alignment_A: alignment of operand A + :type alignment_A: int + :param alignment_B: alignment of operand B + :type alignment_B: int + :param alignment_C: alignment of operand C + :type alignment_C: int + + :return: operation that was constructed + :rtype: cutlass.backend.GemmOperationUniversal + """ + alignment_pref_A = min(128 // cutlass.DataTypeSize[self._element_a], max(self.possible_operations.alignments)) + alignment_pref_B = min(128 // cutlass.DataTypeSize[self._element_b], max(self.possible_operations.alignments)) + alignment_pref_C = min(128 // cutlass.DataTypeSize[self._element_c], max(self.possible_operations.alignments)) + alignment_A = check.alignment_or_default(alignment_A, alignment_pref_A) + alignment_B = check.alignment_or_default(alignment_B, alignment_pref_B) + alignment_C = check.alignment_or_default(alignment_C, alignment_pref_C) + + self.epilogue_functor = self._reset_epilogue_functor_alignment(alignment_C, self.epilogue_functor) + + tensor_A = TensorDescription(self._element_a, self._layout_a, alignment_A) + tensor_B = TensorDescription(self._element_b, self._layout_b, alignment_B) + tensor_C = TensorDescription(self._element_c, self._layout_c, alignment_C) + + if tile_description is None: + if self._tile_description is None: + op = self.possible_operations.operations(alignment_A)[0] + tile_description = datatypes.td_from_profiler_op(op) + else: + tile_description = self._tile_description + else: + valid, err_str = self._valid_tile_description(tile_description) + if not valid: + raise Exception(f"Invalid tile description. {err_str}") + self._tile_description = tile_description + + operation = GemmOperationUniversal( + arch=self.current_cc, + tile_description=tile_description, + A=tensor_A, B=tensor_B, C=tensor_C, + epilogue_functor=self.epilogue_functor, + swizzling_functor=self._swizzling_functor, + ) + + return operation + + def compile(self, tile_description: TileDescription = None, + alignment_A: int = None, alignment_B: int = None, alignment_C: int = None, + print_module: bool = False) -> cutlass.backend.GemmOperationUniversal: + """ + Emits and compiles the kernel currently specified. If ``tile_description`` and any + of the ``alignment`` parameters are set, the kernel will be chosen using this + tile description and alignments. Otherwise, a default tile description and alignment + will be used. + + :param tile_description: tile description specifying shapes and operand types to use in the kernel + :type tile_description: cutlass.backend.TileDescription + :param alignment_A: alignment of operand A + :type alignment_A: int + :param alignment_B: alignment of operand B + :type alignment_B: int + :param alignment_C: alignment of operand C + :type alignment_C: int + :param print_module: whether to print the emitted C++ code + :type print_module: bool + + :return: operation that was compiled + :rtype: cutlass.backend.GemmOperationUniversal + """ + self.operation = self.construct(tile_description, alignment_A, alignment_B, alignment_C) + + if print_module: + print(self.operation.rt_module.emit()) + + compiler.add_module([self.operation,]) + return self.operation + + def _verify_rank(self, tensor): + """ + Verifies that ``tensor`` has rank greater than 1 + + :param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in + :type tensor: numpy/cupy/torch array/tensor object + """ + if len(tensor.shape) < 2: + raise Exception(f"Tensors must be of rank greater than 1. Received tensor of shape: {tensor.shape}") + + def _get_batch_count(self, A, B, C, D) -> int: + """ + Returns the batch count specified by the tensors A, B, C, and D and verifies that these + tensors match in batch size. Presence of a batch dimension is detected by one of the + tensors being rank 3. If a batch dimension is present, it must be present in one of + operands A, B, or C (but need not be in all), and must be present in D. + + :param A: tensor A + :type A: numpy/cupy/torch array/tensor object + :param B: tensor B + :type B: numpy/cupy/torch array/tensor object + :param C: tensor C + :type C: numpy/cupy/torch array/tensor object + :param D: tensor D + :type D: numpy/cupy/torch array/tensor object + + :return: tuple of batch count dimensions + :rtype: tuple + """ + A_batch = prod(A.shape[:-2]) if len(A.shape) > 2 else 1 + B_batch = prod(B.shape[:-2]) if len(B.shape) > 2 else 1 + + if 1 not in [A_batch, B_batch]: + if A_batch != B_batch: + raise Exception(f"Get invalid batch counts: A={A_batch}, B={B_batch}") + return max(A_batch, B_batch) + + def _get_batch_stride(self, tensor) -> int: + """ + Returns the batch stride of ``tensor``. If ``tensor`` is only rank-2, batch stride is 0. + + :param tensor: tensor object to process + :type tensor: numpy/cupy/torch array/tensor object + + :return: stride between each matrix in the batch + :rtype: int + """ + if len(tensor.shape) > 2: + return tensor.shape[-2] * tensor.shape[-1] + else: + return 0 + + def _get_problem_args(self, A, B, C, D) -> tuple: + """ + Returns the problem size and GEMM universal mode to use for the + given operands. + + :param A: tensor A + :type A: numpy/cupy/torch array/tensor object + :param B: tensor B + :type B: numpy/cupy/torch array/tensor object + :param C: tensor C + :type C: numpy/cupy/torch array/tensor object + :param D: tensor D + :type D: numpy/cupy/torch array/tensor object + + :return: tuple containing the problem size (cutlass.shape.GemmCoord), the GEMM mode (cutlass.GemmUniversalMode), and the batch count (int) + :rtype: tuple + """ + M, K = A.shape[-2:] + N = B.shape[-1] + mode = GemmUniversalMode.Gemm + + batch_count = self._get_batch_count(A, B, C, D) + returned_batch_count = batch_count + + # If we are running a batched GEMM in which there is a nonzero batch stride + # only for A, then we can fold the batched dimension of A into the M dimension + # (i.e., (b, m, k) x (k, n) -> (m*b, k) x (k, n)). This works only if both A + # and C are row major. A similar operation can be performed if only B has a nonzero + # batch dimension + if batch_count > 1: + A_row = self._layout_a == cutlass.LayoutType.RowMajor + B_row = self._layout_b == cutlass.LayoutType.RowMajor + C_row = self._layout_c == cutlass.LayoutType.RowMajor + + batched = lambda x : len(x.shape) > 2 and prod(x.shape[:-2]) == batch_count + + if batched(A) and not batched(B) and batched(C) and A_row and C_row: + M *= batch_count + returned_batch_count = 1 + elif not batched(A) and batched(B) and batched(C) and not B_row and not C_row: + N *= batch_count + returned_batch_count = 1 + else: + mode = GemmUniversalMode.Batched + + return GemmCoord(M, N, K), mode, returned_batch_count + + def _verify_type_and_layout(self, tensor, ref_type, ref_layout, name): + """ + Verifies that ``tensor`` has data type ``ref_type`` and layout ``ref_layout``. An exception + is raised if it does not. + + :param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in + :type tensor: numpy/cupy/torch array/tensor object + :param ref_dtype: data type for the tensor that this object was initialized to + :param ref_layout: layout for the tensor that this object was initialized to + :param name: identifier of the tensor to verify. Used in raising exceptions + :type name: str + """ + dtype, layout = datatypes.get_datatype_and_layout(tensor) + if dtype != ref_type or layout != ref_layout: + raise Exception(f'Tensor {name} with type and layout ({dtype}, {layout}) ' + f'does not match the expected type and ' + f'layout of ({ref_type}, {ref_layout}).') + + def run(self, A=None, B=None, C=None, D=None, + alpha=None, beta=None, sync: bool = True, print_module: bool = False, visitor_args: dict = None) -> GemmArguments: + """ + Runs the kernel currently specified. If it has not already been, the kernel is emitted and + compiled. Tensors holding operands and outputs of the kernel are sourced either from the + ``A``, ``B``, ``C``, ``D``, ``alpha``, and ``beta`` + parameters provided in this call, or from those + passed in on the construction of this object -- one of the two must be specified. + + By default, this call returns only once the kernel has completed. To launch the kernel + and immediately return, set ``sync=False``. In this case, it is the responsibility of the + caller to syncrhonize the results of the kernel before attempting to access outputs + by calling ``sync()`` on the arguments returned from this call. + + :param A: tensor representing data type and layout of operand A + :param B: tensor representing data type and layout of operand B + :param C: tensor representing data type and layout of operand C + :param D: tensor representing data type and layout of operand D + :param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B + :param beta: scalar parameter beta from GEMM operation that scales operand C + :param sync: whether the call should wait for the kernel to complete before returning + :type sync: bool + :param print_module: whether to print the emitted C++ code + :type print_module: bool + + :return: arguments passed in to the kernel + :rtype: cutlass.backend.GemmArguments + """ + A = self._verify_tensor(A, self.A, self._element_a, self._layout_a, "A") + B = self._verify_tensor(B, self.B, self._element_b, self._layout_b, "B") + C = self._verify_tensor(C, self.C, self._element_c, self._layout_c, "C") + D = self._verify_tensor(D, self.D, self._element_d, self._layout_d, "D") + alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha") + beta = self._verify_scalar(beta, self.beta, self._element_c, "beta") + + self._verify_rank(A) + self._verify_rank(B) + self._verify_rank(C) + self._verify_rank(D) + + alignment_a = self.possible_operations.find_alignment(A.shape, self._layout_a) + alignment_b = self.possible_operations.find_alignment(B.shape, self._layout_b) + alignment_c = self.possible_operations.find_alignment(C.shape, self._layout_c) + self.compile(self._tile_description, alignment_A=alignment_a, alignment_B=alignment_b, + alignment_C=alignment_c, print_module=print_module) + + problem_size, mode, batch_count = self._get_problem_args(A, B, C, D) + + if mode == GemmUniversalMode.Gemm or batch_count == 1: + kwargs = {'split_k_slices': 1} + else: + kwargs = { + 'batch': batch_count, + 'batch_strides': { + 'A': self._get_batch_stride(A), + 'B': self._get_batch_stride(B), + 'C': self._get_batch_stride(C), + 'D': self._get_batch_stride(D) + } + } + + if isinstance(self.epilogue_functor, EpilogueFunctorVisitor): + output_op = self.operation.epilogue_type(visitor_args) + else: + output_op = self.operation.epilogue_type(alpha, beta) + + arguments = GemmArguments( + operation=self.operation, problem_size=problem_size, + A=A, B=B, C=C, D=D, + output_op=output_op, + gemm_mode=mode, + **kwargs + ) + + self.operation.run(arguments) + + if sync: + arguments.sync() + + return arguments diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/gemm_grouped.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/gemm_grouped.py new file mode 100644 index 0000000000000000000000000000000000000000..bc8c98693e44d589fbcd1dc34208b05ecfaecbe0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/gemm_grouped.py @@ -0,0 +1,257 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" + Ease-of-use interface for constructing, compiling, and running GEMMs. + + The ``GroupedGemm`` interface is meant to allow one to easily instantiate, compile, and run + grouped GEMM operations in CUTLASS via Python, without specifying many configuration parameters. + Under the hood, the interface will select sensible default parameters for the many template + parameters for CUTLASS grouped GEMMs. + + Note: optimal performance is not to be expected from this interface. To achieve optimal + performance, one should specify and tune each configuration parameter. + + The simplest example of using this interface is the following: + + .. highlight:: python + .. code-block:: python + + # As, Bs, Cs, and Ds are torch/numpy/cupy tensor objects + plan = cutlass.op.GroupedGemm(element=cutlass.DataType.f16, layout=cutlass.LayoutType.RowMajor) + plan.run([A0, A1], [B0, B1], [C0, C1], [D0, D1]) +""" + +from cutlass import DataTypeSize +from cutlass.backend.gemm_operation import ( + GemmGroupedArguments, + GemmOperationGrouped, +) +from cutlass.backend.library import ( + SchedulerMode, + TensorDescription, + TileDescription, +) +from cutlass.op.gemm import Gemm +from cutlass.shape import GemmCoord +from cutlass.utils import check, datatypes + + +class GroupedGemm(Gemm): + """ + Constructs a ``GroupedGemm`` object. + + The data types and layouts of operands A, B, and C, along with the data type of output D + and that used for accumulation, are bound to the ``GroupedGemm`` object throughout its lifetime -- + these are not to be changed after a ``GroupedGemm`` has been constructed. + + The constructor has optional parameters for flexibly setting these parameters. Please see the constructor + for ``Gemm`` for examples of these. + + :param cc: compute capability of device to generate kernels for + :type cc: int + :param A: tensor representing data type and layout of operands A + :param B: tensor representing data type and layout of operands B + :param C: tensor representing data type and layout of operands C + :param D: tensor representing data type and layout of operands D + :param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B + :param beta: scalar parameter beta from GEMM operation that scales operand C + :param element_accumulator: data type to be used in accumulation of the product of operands A and B + :type element_accumulator: cutlass.DataType + :param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type + :type element: cutlass.DataType + :param layout: generic layout type to be used for operands A, B, C, and D + :type layout: cutlass.LayoutType + :param element_A: data type to be used for operand A + :type element_A: cutlass.DataType + :param element_B: data type to be used for operand B + :type element_B: cutlass.DataType + :param element_C: data type to be used for operand C + :type element_C: cutlass.DataType + :param element_D: data type to be used for operand D + :type element_D: cutlass.DataType + :type layout_A: layout of operand A + :param layout_A: cutlass.LayoutType + :type layout_B: layout of operand B + :param layout_B: cutlass.LayoutType + :type layout_C: layout of operand C + :param layout_C: cutlass.LayoutType + :type layout_D: layout of operand D + :param layout_D: cutlass.LayoutType + """ + + def __init__( + self, A=None, B=None, C=None, D=None, + alpha=1.0, beta=0.0, element_accumulator=None, + element=None, layout=None, + element_A=None, element_B=None, element_C=None, element_D=None, + layout_A=None, layout_B=None, layout_C=None, + cc: int = None, + ): + super().__init__( + A=A, B=B, C=C, D=D, + alpha=alpha, beta=beta, + element_accumulator=element_accumulator, + element=element, layout=layout, + element_A=element_A, element_B=element_B, + element_C=element_C, element_D=element_D, + layout_A=layout_A, layout_B=layout_B, layout_C=layout_C, + cc=cc + ) + + # Grouped GEMM specializations for SM90 are currently unavailable. Revert to using SM80 + if self.current_cc == 90: + self._reset_options(80) + self._reset_operations(reset_epilogue=False) + + self.name = "grouped_gemm" + + @Gemm.swizzling_functor.setter + def swizzling_functor(self, swizzling_functor): + """ + Sets the swizzling functor to the type specified by `swizzling_functor` + """ + raise Exception('Grouped GEMM does not currently support different swizzling functors') + + def construct(self, tile_description: TileDescription = None, + alignment_A: int = None, + alignment_B: int = None, + alignment_C: int = None) -> GemmOperationGrouped: + """ + Constructs a ``cutlass.backend.GemmOperationGrouped`` based on the input parameters and current + kernel specification of the ``Gemm`` object. + + :param tile_description: tile description specifying shapes and operand types to use in the kernel + :type tile_description: cutlass.backend.TileDescription + :param alignment_A: alignment of operand A + :type alignment_A: int + :param alignment_B: alignment of operand B + :type alignment_B: int + :param alignment_C: alignment of operand C + :type alignment_C: int + + :return: operation that was constructed + :rtype: cutlass.backend.GemmOperationGrouped + """ + alignment_preference = max(self.possible_operations.alignments) + alignment_A = check.alignment_or_default(alignment_A, alignment_preference) + alignment_B = check.alignment_or_default(alignment_B, alignment_preference) + alignment_C = check.alignment_or_default(alignment_C, alignment_preference) + + self.epilogue_functor = self._reset_epilogue_functor_alignment(alignment_C, self.epilogue_functor) + + tensor_A = TensorDescription(self._element_a, self._layout_b, alignment_A) + tensor_B = TensorDescription(self._element_b, self._layout_b, alignment_B) + tensor_C = TensorDescription(self._element_c, self._layout_c, alignment_C) + + if tile_description is None: + op = self.possible_operations.operations(alignment_A)[0] + tile_description = datatypes.td_from_profiler_op(op) + else: + valid, err_str = self._valid_tile_description(tile_description) + if not valid: + raise Exception(f"Invalid tile description. {err_str}") + self.tile_description = tile_description + + operation = GemmOperationGrouped( + arch=self.current_cc, + tile_description=tile_description, + A=tensor_A, B=tensor_B, C=tensor_C, + epilogue_functor=self.epilogue_functor, + swizzling_functor=self._swizzling_functor, + precompute_mode=SchedulerMode.Device) + + return operation + + def run(self, A, B, C, D, + alpha=None, beta=None, sync: bool = True, + print_module: bool = False) -> GemmGroupedArguments: + """ + Runs the kernel currently specified. + + By default, this call returns only once the kernel has completed. To launch the kernel + and immediately return, set ``sync=False``. In this case, it is the responsibility of the + caller to syncrhonize the results of the kernel before attempting to access outputs + by calling ``sync()`` on the arguments returned from this call. + + :param A: list of tensors representing data type and layout of operand A + :type A: list + :param B: list of tensors representing data type and layout of operand B + :type B: list + :param C: list of tensors representing data type and layout of operand C + :type C: list + :param D: list of tensors representing data type and layout of operand D + :type D: list + :param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B + :param beta: scalar parameter beta from GEMM operation that scales operand C + :param sync: whether the call should wait for the kernel to complete before returning + :type sync: bool + :param print_module: whether to print the emitted C++ code + :type print_module: bool + + :return: arguments passed in to the kernel + :rtype: cutlass.backend.GemmGroupedArguments + """ + if len(A) != len(B) or len(A) != len(C) or len(A) != len(D): + raise Exception("Lengths of A, B, C, and D lists must be equal") + + problem_sizes = [] + As, Bs, Cs, Ds = ([None] * len(A) for _ in range(4)) + for i in range(len(A)): + As[i] = self._verify_tensor(A[i], self.A, self._element_a, self._layout_a, "A") + Bs[i] = self._verify_tensor(B[i], self.B, self._element_b, self._layout_b, "B") + Cs[i] = self._verify_tensor(C[i], self.C, self._element_c, self._layout_c, "C") + Ds[i] = self._verify_tensor(D[i], self.D, self._element_d, self._layout_d, "D") + problem_sizes.append(GemmCoord(A[i].shape[0], B[i].shape[1], A[i].shape[1])) + + alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha") + beta = self._verify_scalar(beta, self.beta, self._element_c, "beta") + + alignment_a = min((self.possible_operations.find_alignment(A.shape, self._layout_a) for A in As)) + alignment_b = min((self.possible_operations.find_alignment(B.shape, self._layout_b) for B in Bs)) + alignment_c = min((self.possible_operations.find_alignment(C.shape, self._layout_c) for C in Cs)) + self.compile(self.tile_description, alignment_A=alignment_a, alignment_B=alignment_b, + alignment_C=alignment_c, print_module=print_module) + + arguments = GemmGroupedArguments( + operation=self.operation, + problem_sizes=problem_sizes, + A=As, B=Bs, C=Cs, D=Ds, + output_op=self.operation.epilogue_type(alpha, beta) + ) + + self.operation.run(arguments) + + if sync: + arguments.sync() + + return arguments diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/op.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/op.py new file mode 100644 index 0000000000000000000000000000000000000000..7bd0e545e9890c18d66462160e1d81ca8b1da570 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/op/op.py @@ -0,0 +1,378 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Base operation used for defining high-level CUTLASS operations (e.g., GEMM, Conv2d) +""" + +from bisect import bisect_left + +import cutlass +from cutlass import option_registry, epilogue +from cutlass.backend.evt import EpilogueFunctorVisitor +from cutlass.backend.utils.device import device_cc +from cutlass.epilogue import get_activations +from cutlass.library_defaults import KernelsForDataType, _generator_ccs +from cutlass.swizzle import get_swizzling_functors +from cutlass.utils import datatypes, check + + +class OperationBase: + """ + Base operation used for defining high-level CUTLASS operations (e.g., GEMM, Conv2d) + """ + + def __init__(self, cc: int = None, kernel_cc: int = None, operation_kind = cutlass.OperationKind.Gemm): + """ + :param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90 + :type cc: int + :param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80 + :type kernel_cc: int + """ + self.operation_kind = operation_kind + self.cc = cc if cc is not None else device_cc() + self.specified_kernel_cc = kernel_cc is not None + self.current_cc = kernel_cc if kernel_cc is not None else self._find_closest_cc(self.cc) + self.tile_description = None + + self.options = option_registry.options_for_cc(self.current_cc, operation_kind) + + if self.options is None: + raise Exception(f"Invalid or unsupported compute capability: {self.current_cc}") + + # Default activation function: identity + self._activation = epilogue.identity + + def _find_closest_cc(self, cc: int) -> int: + """ + Returns the closest CC in _generator_ccs less than or equal to `cc` + + :param cc: compute capability to query + :type cc: int + + :returns: closest CC in _generator_ccs less than or equal to `cc` + :rtype: int + """ + if cc in _generator_ccs: + return cc + + # Find closest CC lower than this CC + idx = bisect_left(_generator_ccs, cc) + if idx == 0: + raise Exception(f'No valid CC to fall back to for {cc}') + return _generator_ccs[idx-1] + + def activations(self) -> list: + """ + Returns possible activation functions that can be used + + :return: list of activation functions that can be used + :rtype: list + """ + return get_activations() + + def swizzling_functors(self) -> list: + """ + Returns possible swizzling functions that can be used + + :return: list of swizzling functions that can be used + :rtype: list + """ + return get_swizzling_functors() + + def _reset_options(self, cc: int): + """ + Resets the kernel options based on cc + + :param cc: compute capability to reset to + :type cc: int + """ + if cc != self.current_cc: + if cc not in _generator_ccs: + raise Exception(f'Invalid CC for CUTLASS kernels: {cc}.') + self.current_cc = cc + self.options = option_registry.options_for_cc(self.current_cc, self.operation_kind) + + def _verify_scalar(self, scalar, ref_scalar, ref_dtype, name): + """ + Verifies the following properties: + 1) Either ``scalar`` or ``ref_scakar`` must be set (i.e., not ``None``) + 2) If ``scalar`` is not ``None``, its datatype must match matches the current version + set by the plan (i.e., those in ``ref_dtype``) + + If either of these properties does not hold, an exception is raised. If these properties hold and + ``scalar`` is not ``None``, ``scalar`` is returned. Otherwise, ``ref_scalar`` is returned. + + :param scalar: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in + :type scalar: numpy/cupy/torch scalar + :param ref_scalar: object representing a tensor passed in on construction of this object, or ``None`` if no tensor was passed in + :type ref_scalar: numpy/cupy/torch scalar + :param ref_dtype: data type for the scalar that this object was initialized to + :param name: identifier of the scalar to verify. Used in raising exceptions + :type name: str + + :return: valid scalar to use + :rtype: numpy/cupy/torch scalar + """ + if scalar is None: + if ref_scalar is None: + raise Exception(f"Scalar {name} must be set.") + return ref_scalar + if hasattr(scalar, "dtype"): + dtype = datatypes.library_type(scalar.dtype) + if dtype != ref_dtype: + raise Exception( + f"Tensor {name} with type {dtype} does not match expected type {ref_dtype}." + ) + return scalar + + def _verify_tensor(self, tensor, ref_tensor, ref_dtype, ref_layout, name): + """ + Verifies the following properties: + 1) Either ``tensor`` or ``ref_tensor`` must be set (i.e., not ``None``) + 2) If ``tensor`` is not ``None``, its datatype and layout must match matches the current versions + set by the plan (i.e., those in ``ref_dtype`` and ``ref_layout``) + + If either of these properties does not hold, an exception is raised. If these properties hold and + ``tensor`` is not ``None``, ``tensor`` is returned. Otherwise, ``ref_tensor`` is returned. + + :param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in + :type tensor: numpy/cupy/torch array/tensor object + :param ref_tensor: object representing a tensor passed in on construction of this object, or ``None`` if no tensor was passed in + :type ref_tensor: numpy/cupy/torch array/tensor object + :param ref_dtype: data type for the tensor that this object was initialized to + :param ref_layout: layout for the tensor that this object was initialized to + :param name: identifier of the tensor to verify. Used in raising exceptions + :type name: str + + :return: valid tensor object to use + :rtype: numpy/cupy/torch array/tensor object + """ + if tensor is None: + if ref_tensor is None: + raise Exception(f"Tensor {name} must be set.") + return ref_tensor + + self._verify_type_and_layout(tensor, ref_dtype, ref_layout, name) + return tensor + + # + # Opcode Related + # + + @property + def opclass(self) -> cutlass.OpcodeClass: + """ + Returns the opcode class currently in use by the GEMM + + :return: opcode class currently in use + :rtype: cutlass.OpcodeClass + """ + return self.op_class + + @opclass.setter + def opclass(self, oc: cutlass.OpcodeClass): + if isinstance(oc, str): + oc = datatypes.getattr_enum(cutlass.OpcodeClass, oc) + if oc in self.possible_op_classes: + self.op_class = oc + else: + raise Exception( + f'Unsupported operation class {oc} for CC {self.cc} and data type combination ' + f'({self._element_a}, {self._element_b}, {self._element_accumulator}) and ' + f'layout combination ({self._layout_a}, {self._layout_b}).') + + # Changing the op class changes the elements per access in the epilogue. Reset this. + if self.op_class == cutlass.OpcodeClass.Simt: + elements_per_access = 1 + else: + elements_per_access = 128 // cutlass.DataTypeSize[self._element_c] + + if self.epilogue_functor is not None: + self.epilogue_functor = self._reset_epilogue_functor_alignment(elements_per_access, self.epilogue_functor) + + # Changing the op class also changes the possible operations available. Reset these. + self.possible_operations = self.options.operations( + self.op_class, self._element_a, self._element_b, + self._element_accumulator, self._layout_a, self._layout_b) + + # + # Epilogue + # + + def _create_epilogue_functor_activation(self, activation): + """ + Returns the epilogue functor with given activation function + """ + if self.epilogue_functor is None: + if self.op_class == cutlass.OpcodeClass.Simt: + elements_per_access = 1 + else: + elements_per_access = 128 // cutlass.DataTypeSize[self._element_c] + else: + elements_per_access = self.epilogue_functor.epilogue_vector_length + + if not self.specified_kernel_cc: + if self.current_cc == 90 and activation != epilogue.identity: + # CUTLASS 3.0 kernels currently only support identity activation. If one requests a non-identity activation, + # revert to using a CUTLASS 2.x kernel by using SM80-tagged kernels. + cutlass.logger.warning("Reverting to using SM80-tagged kernel. Opclass may change.") + self._reset_options(80) + self._reset_operations(reset_epilogue=False) + elif (self.cc == 90 and self.current_cc != 90 and activation == epilogue.identity): + # SM80 fallback kernels are currently used. Since an identity activation is requested, + # we can switch back to using SM90 kernels. + self._reset_options(90) + self._reset_operations(reset_epilogue=False) + else: + if self.current_cc == 90 and activation != epilogue.identity: + raise Exception("Epilogues with elementwise fusion are not currently supported " + "in the Python interface for 3.x kernels. To use 2.x kernels " + "with fused elementwise epilogues, do not set the `kernel_cc` " + "parameter when constructing the Gemm object.") + + return epilogue.get_activation_epilogue( + activation, + self._element_c, + elements_per_access, + self._element_accumulator, + self._element_accumulator, + ) + + def _reset_epilogue_functor_activation(self, activation): + """ + Set the epilogue functor based on the provided activation function + """ + self.epilogue_functor = self._create_epilogue_functor_activation(activation) + + def _reset_epilogue_functor_alignment(self, alignment, epilogue_functor): + """ + Reset the alignment of the current epilogue functor based on alignment C + """ + if isinstance(epilogue_functor, EpilogueFunctorVisitor): + return epilogue_functor + + if epilogue_functor is None or not hasattr(epilogue_functor, 'activation_functor'): + # Identity epilogue does not have 'activation_functor' + activation = epilogue.identity + else: + activation = epilogue_functor.activation_functor + + epilogue_functor = epilogue.get_activation_epilogue( + activation, + self._element_c, + alignment, + self._element_accumulator, + self._element_accumulator, + ) + return epilogue_functor + + @property + def activation(self): + """ + Returns the type of the current activation function used + """ + if hasattr(self.epilogue_functor, "activation_functor"): + return self.epilogue_functor.activation_functor + else: + return epilogue.identity + + @activation.setter + def activation(self, act): + """ + Sets the type of the activation function to use + Activation can come with a set of arguments + + :param act: type of activation function to use + :type act: str or tuple. e.g. "relu", ("leaky_relu", 0.01) + + """ + if isinstance(act, tuple): + if isinstance(act[0], str): + act_fn = getattr(cutlass.backend.epilogue, act[0]) + else: + act_fn = act[0] + self._reset_epilogue_functor_activation(act_fn) + self._activation_args = act[1] + self._activation = act[0] + else: + if isinstance(act, str): + act = getattr(cutlass.backend.epilogue, act) + self._reset_epilogue_functor_activation(act) + self._activation = act + + @property + def epilogue_visitor(self): + """ + Return the epilogue functor + """ + return self.epilogue_functor + + @epilogue_visitor.setter + def epilogue_visitor(self, visitor): + """ + Create the epilogue visitor + """ + self.epilogue_functor = EpilogueFunctorVisitor(self.cc, visitor) + + # The epilogue_functor may consume too much shared memory + # Reset the possible operations + if self.cc != 90: + # The shared memory is only a concern for sm90 epilogue + # In sm80, the epilogue and mainloop share the shared memory + return + datatype_comb = self.possible_operations.datatype_comb + layout_comb = self.possible_operations.layout_comb + new_possible_operations = KernelsForDataType(datatype_comb, layout_comb) + for operation in self.possible_operations.all_operations: + td = datatypes.td_from_profiler_op(operation) + # Filter invalid epilogue schedules + if td.epilogue_schedule not in [ + cutlass.EpilogueScheduleType.TmaWarpSpecialized, + cutlass.EpilogueScheduleType.TmaWarpSpecializedCooperative]: + continue + epilogue_smem_bytes = self.epilogue_functor.get_smem_size(td) + + # Verify the maximum number of mainloop stages + mainloop_smem_per_stage = check.calculate_smem_usage_per_stage(td, cutlass.OperationKind.Gemm) + smem_capacity_bytes = cutlass.SharedMemPerCC[self.cc] << 10 + mainloop_stages = (smem_capacity_bytes - epilogue_smem_bytes) // mainloop_smem_per_stage + if mainloop_stages < 2: + # Mainloop stages must >= 2 + continue + + new_possible_operations.add(operation) + if len(new_possible_operations.all_operations) == 0: + raise RuntimeError( + "The epilogue consumes too much shared memory. " + "No valid tile description is found in the generator.") + self.possible_operations = new_possible_operations diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/profiler/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/profiler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d9e9cdc854d9040b7031397e410a10e20da03efc --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/profiler/__init__.py @@ -0,0 +1,37 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Profilers for Python Interface +""" + +from cutlass.profiler.event_profiler import CUDAEventProfiler diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/profiler/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/profiler/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18ebeb9c6bd00c8b410955111cd5dee3ea26554c Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/profiler/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/profiler/__pycache__/event_profiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/profiler/__pycache__/event_profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db5af8c058ad3151ebdd531aeb1fc91f48155824 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/profiler/__pycache__/event_profiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/profiler/event_profiler.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/profiler/event_profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..71f290c12020d8bf51bef88a756591f21b5e627a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/profiler/event_profiler.py @@ -0,0 +1,185 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Profiler based on the cuda events +""" + +import re +import subprocess + +from cuda import cuda, cudart +import numpy as np +import torch + +from cutlass import CUTLASS_PATH +from cutlass.backend.library import DataTypeSize +from cutlass.op.op import OperationBase +from cutlass.shape import GemmCoord + + +class GpuTimer: + def __init__(self) -> None: + self.events = [ + cuda.cuEventCreate(cuda.CUevent_flags.CU_EVENT_DEFAULT)[1], + cuda.cuEventCreate(cuda.CUevent_flags.CU_EVENT_DEFAULT)[1], + ] + + def start(self, stream=cuda.CUstream(0)): + (err,) = cuda.cuEventRecord(self.events[0], stream) + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError(f"CUDA Error {str(err)}") + + def stop(self, stream=cuda.CUstream(0)): + (err,) = cuda.cuEventRecord(self.events[1], stream) + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError(f"CUDA Error {str(err)}") + pass + + def stop_and_wait(self, stream=cuda.CUstream(0)): + self.stop(stream) + if stream: + (err,) = cuda.cuStreamSynchronize(stream) + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError(f"CUDA Error {str(err)}") + else: + (err,) = cudart.cudaDeviceSynchronize() + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError(f"CUDA Error {str(err)}") + + def duration(self, iterations=1): + err, duration = cuda.cuEventElapsedTime(self.events[0], self.events[1]) + if err != cuda.CUresult.CUDA_SUCCESS: + raise RuntimeError(f"CUDA Error {str(err)}") + return duration / float(iterations) + + +class CUDAEventProfiler: + def __init__(self, op: OperationBase, warmup_iterations: int=500, iterations: int=500, *args, **kwargs) -> None: + self.arguments = op.run(*args, **kwargs) + self.operation = op.operation + self.warmup_iterations = warmup_iterations + self.iterations = iterations + self.timer = GpuTimer() + + # + # Cutlass Python Interface Profiler + # + + def __call__(self): + for _ in range(self.warmup_iterations): + self.operation.run(self.arguments) + + self.timer.start() + for _ in range(self.iterations): + self.operation.run(self.arguments) + + self.timer.stop_and_wait() + runtime = self.timer.duration(self.iterations) + return runtime + + # + # CUTLASS Profiler + # + + def run_cutlass_profiler(self): + alpha = 1.0 + beta = 1.0 + + profiler_path = CUTLASS_PATH + "/build/tools/profiler/cutlass_profiler" + kernel_name = self.operation.procedural_name() + verification_providers = "device" + provider = "cutlass" + problem_size = self.arguments.problem_size + + if "cutlass3x" in kernel_name: + # cutlass3x generator only have column-major output + layout_name = self.operation.layout_name_3x() + if layout_name[-1] == "t": + new_layout_name = "".join(["n" for l in layout_name if l == "t" or "t"]) + problem_size = GemmCoord(problem_size.n, problem_size.m, problem_size.k) + kernel_name = kernel_name.replace(layout_name, new_layout_name) + + batch_count = self.arguments.batch_count + + cmd = f"{profiler_path} --kernels={kernel_name} --verification-providers={verification_providers} " \ + f"--providers={provider} --m={problem_size.m()} --n={problem_size.n()} --k={problem_size.k()} " \ + f"--batch_count={batch_count} --alpha={alpha} --beta={beta} "\ + f"--warmup-iterations={self.warmup_iterations} --profiling-iterations={self.iterations}" + + result = subprocess.getoutput(cmd) + + m = re.search(r"Runtime:\s+(?P\d+.\d+)", result) + runtime = float(m.group("runtime")) + + m = re.search(r"Bytes:\s+(?P\d+)", result) + bytes = int(m.group("bytes")) + + m = re.search(r"FLOPs:\s+(?P\d+)", result) + flops = int(m.group("flops")) + + # check if the problem size matches + assert bytes == self.bytes(problem_size, batch_count, beta) + assert flops == self.flops(problem_size, batch_count, beta) + + return runtime + + def bytes(self, problem_size, batch_count=1, beta=0.0): + m = problem_size.m() + n = problem_size.n() + k = problem_size.k() + + bytes = ( + (DataTypeSize[self.operation.A.element] * m // 8) * k + + (DataTypeSize[self.operation.B.element] * n // 8) * k + + (DataTypeSize[self.operation.C.element] * m // 8) * n + ) + + if beta != 0: + bytes += (DataTypeSize[self.operation.C.element] * m // 8) * n + + bytes *= batch_count + + return bytes + + def flops(self, problem_size, batch_count=1, beta=0.0): + m = problem_size.m() + n = problem_size.n() + k = problem_size.k() + + flops_ = (m * n * k) * 2 * batch_count + + if beta != 0: + flops_ += m * n * batch_count * 2 + + return flops_ + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/shape.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/shape.py new file mode 100644 index 0000000000000000000000000000000000000000..78e164d76442f8ae4d8d6924b44acb6c091dc49a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/shape.py @@ -0,0 +1,184 @@ +################################################################################ +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################ + +""" +Utilities for expressing shapes +""" + +from cutlass import ( + ConvMode, + ConvKind, + LayoutType +) +from cutlass.backend.c_types import ( + Conv2DProblemSize_, + GemmCoord_, + GemmCoordBatched_ +) + + +class MatrixCoord: + def __init__(self, row, col): + self._row = row + self._col = col + + @property + def row(self): + return self._row + + @property + def column(self): + return self._col + + def leading_dimension(self, layout: LayoutType) -> int: + """ + Returns the leading dimension for a matrix with layout ``layout`` and shape provided by the MatrixCoord. + + :param layout: layout of matrix + :type layout: cutlass.LayoutType + + :returns: leading dimension + :rtype: int + """ + if layout == LayoutType.RowMajor: + return self._col + elif layout == LayoutType.ColumnMajor: + return self._row + else: + raise Exception(f'Unsupported layout for leading dimension calculation: {layout}') + + +class GemmCoord: + def __init__(self, m: int, n: int, k: int): + self._m = m + self._n = n + self._k = k + + @property + def m(self) -> int: + return self._m + + @property + def n(self) -> int: + return self._n + + @property + def k(self) -> int: + return self._k + + @property + def mk(self) -> MatrixCoord: + return MatrixCoord(self._m, self._k) + + @property + def mn(self) -> MatrixCoord: + return MatrixCoord(self._m, self._n) + + @property + def kn(self) -> MatrixCoord: + return MatrixCoord(self._k, self._n) + + @property + def ctype(self) -> GemmCoord_: + return GemmCoord_(self._m, self._n, self._k) + + def batched_ctype(self, batch_count: int) -> GemmCoordBatched_: + return GemmCoordBatched_(self._m, self._n, self._k, batch_count) + + +class Conv2DProblemSize: + def __init__( + self, n: int, h: int, w: int, c: int, + k: int, r: int, s: int, c_: int, + pad_h: int, pad_w: int, stride_h: int, stride_w: int, + dilation_h: int, dilation_w: int, mode: ConvMode=ConvMode.CrossCorrelation, + split_k_slices: int=1, groups: int=1): + + self.N = n + self.H = h + self.W = w + self.C = c + self.K = k + self.R = r + self.S = s + self.pad_h = pad_h + self.pad_w = pad_w + self.stride_h = stride_h + self.stride_w = stride_w + self.dilation_h = dilation_h + self.dilation_w = dilation_w + self.mode = int(mode) + self.split_k_slices = split_k_slices + self.groups = groups + self.P = ((h + pad_h * 2 - r * dilation_h) // stride_h) + 1 + self.Q = ((w + pad_w * 2 - s * dilation_w) // stride_w) + 1 + + @property + def ctype(self) -> Conv2DProblemSize_: + return Conv2DProblemSize_(self) + + def implicit_gemm_size(self, kind: ConvKind): + if kind == ConvKind.Fprop: + return GemmCoord( + self.N * self.P * self.Q, + self.K, + self.R * self.S * self.C // self.groups + ) + elif kind == ConvKind.Dgrad: + return GemmCoord( + self.N * self.H * self.W, + self.C, + self.R * self.S * self.K + ) + elif kind == ConvKind.Wgrad: + return GemmCoord( + self.K, + self.R * self.S * self.C, + self.N * self.P * self.Q + ) + + @staticmethod + def from_sizes(input_size, weight_size): + K, R, S, _ = weight_size + pad_h = R // 2 + pad_w = S // 2 + stride_h = 1 + stride_w = 1 + dilation_h = 1 + dilation_w = 1 + return Conv2DProblemSize( + *input_size, + *weight_size, + pad_h, pad_w, + stride_h, stride_w, + dilation_h, dilation_w + ) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/swizzle.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/swizzle.py new file mode 100644 index 0000000000000000000000000000000000000000..498ab74eb51eace0f2d883ae7d0f6ec0d6c2bbca --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/swizzle.py @@ -0,0 +1,65 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Registry of swizzling functions +""" + +from cutlass import SwizzlingFunctor + + +IdentitySwizzle1 = SwizzlingFunctor.Identity1 +IdentitySwizzle2 = SwizzlingFunctor.Identity2 +IdentitySwizzle4 = SwizzlingFunctor.Identity4 +IdentitySwizzle8 = SwizzlingFunctor.Identity8 +HorizontalSwizzle = SwizzlingFunctor.Horizontal +ThreadblockSwizzleStreamK = SwizzlingFunctor.StreamK +StridedDgradIdentitySwizzle1 = SwizzlingFunctor.StridedDgradIdentity1 +StridedDgradIdentitySwizzle4 = SwizzlingFunctor.StridedDgradIdentity4 +StridedDgradHorizontalSwizzle = SwizzlingFunctor.StridedDgradHorizontal + + +_swizzling_functors = [ + IdentitySwizzle1, + IdentitySwizzle2, + IdentitySwizzle4, + IdentitySwizzle8, + HorizontalSwizzle, + ThreadblockSwizzleStreamK, + StridedDgradIdentitySwizzle1, + StridedDgradIdentitySwizzle4, + StridedDgradHorizontalSwizzle, +] + + +def get_swizzling_functors(): + return _swizzling_functors diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f854804bf46f10f504e4f3689c82a373e2b8233c --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/__init__.py @@ -0,0 +1,41 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +from cutlass.utils.check import ( + alignment_or_default, + calculate_smem_usage, + calculate_smem_usage_per_stage, + valid_cluster_shape, + valid_schedule, + valid_stage_count, + update_alignment, +) diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8c62f0baf16af81684a84a49ffd5279cd901286 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/__pycache__/check.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/__pycache__/check.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bc4dd4e0f862e57550d00bdb5b8797c0d535868 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/__pycache__/check.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/__pycache__/datatypes.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/__pycache__/datatypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31912dca335a9b057adcc5a983d937b832904e48 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/__pycache__/datatypes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/check.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/check.py new file mode 100644 index 0000000000000000000000000000000000000000..1ca0eb8a8ccb9bb52edeeeddd6de085a0d9e6907 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/check.py @@ -0,0 +1,263 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Utility functions for checking constraints on kernels and calculating kernel attributes +""" + +import ctypes + +import cutlass +from cutlass import DataTypeSize +from cutlass.backend.library import TileDescription + + +def calculate_smem_usage_per_stage(td: TileDescription, operation_kind: cutlass.OperationKind) -> int: + """ + Returns the amount of shared memory in bytes consumed in a single stage of a kernel. + + :param td: tile description to compute shared memory of + :type td: TileDescription + :param operation_kind: identifier for the type of operation being performed + :type operation_kind: cutlass.OperationKind + + :return: number of bytes of shared memory consumed by a single stage + :rtype: int + """ + m, n, k = td.threadblock_shape + + if operation_kind == cutlass.OperationKind.Gemm: + stage_barrier_bytes = 32 + return ( + (DataTypeSize[td.math_instruction.element_a] * m * k // 8) + + (DataTypeSize[td.math_instruction.element_b] * k * n // 8) + + stage_barrier_bytes + ) + else: + raise Exception(f"No available shared memory calculation for operation kind {operation.operation_kind}") + + +def calculate_smem_usage(operation) -> int: + """ + Returns the amount of shared memory in bytes consumed by a kernel. + + :return: number of bytes of shared memory consumed by the operation + :return: int + """ + _per_stage = calculate_smem_usage_per_stage(operation.tile_description, operation.operation_kind) + return _per_stage * operation.tile_description.stages + + +def valid_stage_count( + cc: int, + kernel_cc: int, + td: TileDescription, + element_C: cutlass.DataType = None, + element_D: cutlass.DataType = None) -> tuple: + """ + Checks whether a device with `cc` supports the number of stages within `tile_description`, both + based on raw limits on the number of stages and based on shared memory capacity + + :param cc: compute capability of device in question + :type cc: int + :param kernel_cc: compute capability that the kernel targets (corresponding to the arch::SMxy tag in CUTLASS) + :type kernel_cc: int + :param td: tile description to check + :type td: TileDescription + :param element_C: data type of operand C + :type element_C: cutlass.DataType + :param element_D: data type of operand D + :type element_D: cutlass.DataType + + :return: tuple with the first element indicating whether the provided tile description is + valid for the provided device and the second element being an error message + :rtype: tuple + """ + if kernel_cc == 90: + if (td.stages is None or td.stages == 0): + # Stage count of None or 0 for SM90 indicates that the CollectiveBuilder automatically + # determines the stage count to use. Thus, all settings are valid in these scenarios. + return (True, "") + else: + cutlass.logger.warning( + "Setting an explicit stage count for SM90 kernels currently may " + "result in compilation errors if the combination of tile shape, " + "stage count, and shared memory requirement of the epilogue exceeds " + "the available shared memory per SM.") + + if td.stages <= 0: + return (False, f"Stage counts must be positive integers. Tile description has stage count of {td.stages}.") + + if cc < 80 and td.stages != 2: + return (False, f"Tile description has stage count of {td.stages}, " + f"but only 2 stages are supported on SM{cc}.") + + # The calculation below does not consider shared memory used by the epilogue and, thus, + # only catches cases in which the mainloop exceeds the device's shared memory capacity. + # This is not a concern for CUTLASS 2.x kernels, for which the shared memory of the + # mainloop and epilogue is shared. + smem_per_stage = calculate_smem_usage_per_stage(td, cutlass.OperationKind.Gemm) + smem_usage_mainloop = (smem_per_stage * td.stages) + smem_arch = cutlass.SharedMemPerCC[cc] << 10 + if smem_usage_mainloop > smem_arch: + return ( False, + "Configuration uses too much shared memory. Consider reducing stage count or tile shape.\n" + f"Details:\n" + f"Mainloop uses {smem_per_stage} bytes of shared memory per stage, and " + f"{td.stages} stages for a total of {smem_usage_mainloop} bytes.\n" + f"The maxmium amount of shared memory that can be used per block on CC {cc} is {smem_arch}.") + + return (True, "") + + +def valid_cluster_shape(cc: int, cluster_shape: list) -> tuple: + """ + Checks whether a device with `cc` supports a thread block cluster of shape `cluster_shape`. + + :param cc: compute capability of device in question + :type cc: int + :param cluster_shape: dimensions of thread block cluster shape to check + :type cluster_shape: list + + :return: tuple with the first element indicating whether the provided cluster shape is + valid for the provided device and the second element being an error message + :rtype: tuple + """ + + if cc < 90: + if cluster_shape != [1, 1, 1]: + return (False, + f"Cluster shape for pre-SM90 architectures must be [1, 1, 1]. Received cluster shape of " + f"{cluster_shape} for SM{cc}.") + else: + return (True, "") + + if len(cluster_shape) != 3: + return (False, + f"Cluster shapes must be rank-3. Received {cluster_shape} (rank {len(cluster_shape)}") + + if cluster_shape[2] != 1: + return (False, + "CUTLASS kernels currently require the third dimension of cluster shape to be 1. " + f"Received cluster shape of {cluster_shape}.") + + # The CUDA programming guide currently defines a maximum of 8 thread blocks per cluster + # as being portably supported (https://docs.nvidia.com/cuda/cuda-c-programming-guide/#thread-block-clusters). + # Current CUTLASS kernels only have non-unit cluster dimensions within the first two dimensions, + # so we check that the first two dimensions of the cluster shape do not exceed 8 thread blocks in total. + blocks_in_2d = cluster_shape[0] * cluster_shape[1] + if blocks_in_2d > 8: + return (False, + f"Thread block clusters with more than 8 thread blocks are currently unsupported on SM{cc}. " + f"Received cluster shape {cluster_shape}, which has {blocks_in_2d} thread blocks.") + return (True, "") + + +def valid_schedule( + cc: int, + kernel_schedule: cutlass.KernelScheduleType, + epilogue_schedule: cutlass.EpilogueScheduleType, + tile_scheduler: cutlass.TileSchedulerType) -> tuple: + """ + Checks that the kernel and epilogue schedules passed in are a valid combination for + a device of compute capability ``cc``. + + :param cc: compute capability of device in question + :type cc: int + :param kernel_schedule: kernel schedule type + :type kernel_schedule: cutlass.KernelScheduleType + :param epilogue_schedule: epilogue schedule type + :type epilogue_schedule: cutlass.EpilogueScheduleType + :param tile_scheduler: tile scheduler type + :type tile_scheduler: cutlass.TileSchedulerType + + :return: tuple with the first element indicating whether the provided schedules are + valid for the provided device and the second element being an error message + :rtype: tuple + """ + kernel_auto = (kernel_schedule == cutlass.KernelScheduleType.ScheduleAuto) + epilogue_auto = (epilogue_schedule == cutlass.EpilogueScheduleType.ScheduleAuto) + tile_scheduler_default = (tile_scheduler == cutlass.TileSchedulerType.Default) + if cc < 90 and not (kernel_auto and epilogue_auto and tile_scheduler_default): + return (False, "Non-default schedules are only supported on SM90 and beyond") + + if (kernel_auto and not epilogue_auto) or (not kernel_auto and epilogue_auto): + return (False, "Kernel and epilogue schedules must either both be auto or neither be auto") + + if not tile_scheduler_default: + if (tile_scheduler == cutlass.TileSchedulerType.StreamK) and (kernel_schedule != cutlass.KernelScheduleType.TmaWarpSpecializedCooperative): + return (False, "Stream-K tile scheduler is currently only supported with the cooperative kernel schedule") + return (True, "") + + +def alignment_or_default(alignment_provided: int, default_alignment: int) -> int: + """ + Returns `alignment_provided` if it is set, otherwise `default_alignment` and checks + that `alignment_provided` does not exceed `default_alignment`. + + :param alignment_provided: alignment preference specified. Can be None. + :type alignment_provided: int + :param default_alignment: alignment to use if `alignment_provided` is None + :type default_alignment: int + + :return: alignment to use + :rtype: int + """ + if alignment_provided is not None: + if alignment_provided > default_alignment: + raise Exception(f"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.") + return alignment_provided + + return default_alignment + + +def update_alignment(alignment_provided:int, default_alignment: int) -> int: + """ + Returns `alignment_provided` if it is set, otherwise `default_alignment` and checks + that `alignment_provided` does not exceed `default_alignment`. + + :param alignment_provided: alignment preference specified. Can be None. + :type alignment_provided: int + :param default_alignment: alignment to use if `alignment_provided` is None + :type default_alignment: int + + :return: alignment to use + :rtype: int + """ + if alignment_provided is not None: + if alignment_provided > default_alignment: + if alignment_provided % default_alignment == 0: + return default_alignment + raise Exception(f"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.") + return alignment_provided + + return default_alignment diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/datatypes.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/datatypes.py new file mode 100644 index 0000000000000000000000000000000000000000..fa229557f5ed8861a34b36afd58b464d8b22d669 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass/utils/datatypes.py @@ -0,0 +1,302 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Utility functions for converting between frontend datatypes and CUTLASS datatypes +""" + +import cutlass +from cutlass import ( + DataTypeSize, +) +from cutlass.backend.library import ( + MathInstruction, + MathOperation, + TileDescription, +) + +try: + import numpy as np + + numpy_available = True + _library_to_numpy_dict = { + cutlass.DataType.f16: np.float16, + cutlass.DataType.f32: np.float32, + cutlass.DataType.f64: np.float64, + cutlass.DataType.s8: np.int8, + cutlass.DataType.s32: np.int32, + } +except ImportError: + numpy_available = False + _library_to_numpy_dict = {} + + +def numpy_library_type(inp) -> cutlass.DataType: + if numpy_available: + if inp == np.float16: + return cutlass.DataType.f16 + elif inp == np.float32: + return cutlass.DataType.f32 + elif inp == np.float64: + return cutlass.DataType.f64 + elif inp == np.int8: + return cutlass.DataType.s8 + elif inp == np.int32: + return cutlass.DataType.s32 + return None + + +def numpy_type(inp): + return _library_to_numpy_dict.get(inp, None) + + +try: + import cupy as cp + + cupy_available = True + _library_to_cupy_dict = { + cutlass.DataType.f16: cp.float16, + cutlass.DataType.f32: cp.float32, + cutlass.DataType.f64: cp.float64, + cutlass.DataType.s8: cp.int8, + cutlass.DataType.s32: cp.int32, + } +except ImportError: + cupy_available = False + _library_to_cupy_dict = {} + + +def cupy_library_type(inp) -> cutlass.DataType: + if cupy_available: + if inp == cp.float16: + return cutlass.DataType.f16 + elif inp == cp.float32: + return cutlass.DataType.f32 + elif inp == cp.float64: + return cutlass.DataType.f64 + return None + + +def cupy_type(inp): + return _library_to_cupy_dict.get(inp, None) + + +try: + import torch + + torch_available = True + _torch_to_library_dict = { + torch.half: cutlass.DataType.f16, + torch.float16: cutlass.DataType.f16, + torch.bfloat16: cutlass.DataType.bf16, + torch.float: cutlass.DataType.f32, + torch.float32: cutlass.DataType.f32, + torch.double: cutlass.DataType.f64, + torch.float64: cutlass.DataType.f64, + torch.int8: cutlass.DataType.s8, + torch.int32: cutlass.DataType.s32, + torch.uint8: cutlass.DataType.u8, + } + + _library_to_torch_dict = { + cutlass.DataType.f16: torch.half, + cutlass.DataType.f16: torch.float16, + cutlass.DataType.bf16: torch.bfloat16, + cutlass.DataType.f32: torch.float, + cutlass.DataType.f32: torch.float32, + cutlass.DataType.f64: torch.double, + cutlass.DataType.f64: torch.float64, + cutlass.DataType.s8: torch.int8, + cutlass.DataType.s32: torch.int32, + cutlass.DataType.u8: torch.uint8, + } +except ImportError: + torch_available = False + _torch_to_library_dict = {} + _library_to_torch_dict = {} + + +def torch_library_type(inp) -> cutlass.DataType: + return _torch_to_library_dict.get(inp, None) + + +def torch_type(inp): + return _library_to_torch_dict.get(inp, None) + + +try: + import bfloat16 + + bfloat16_available = True +except ImportError: + bfloat16_available = False + + +def bfloat16_library_type(inp) -> cutlass.DataType: + if bfloat16_available: + if inp == bfloat16.bfloat16: + return cutlass.DataType.bf16 + + +def bfloat16_type(inp): + if bfloat16_available: + if inp == cutlass.DataType.bf16: + return bfloat16.bfloat16 + + +def library_type(inp): + if inp in cutlass.DataTypeSize.keys(): + return inp + + for cvt_fn in [ + bfloat16_library_type, + cupy_library_type, + numpy_library_type, + torch_library_type, + ]: + out = cvt_fn(inp) + if out is not None: + return out + + raise Exception(f"No available conversion from type {inp} to a library type.") + + +def _tensor_from_numpy(np_tensor): + dtype = library_type(np_tensor.dtype) + if np_tensor.flags.c_contiguous: + layout = cutlass.LayoutType.RowMajor + elif np_tensor.flags.f_contiguous: + layout = cutlass.LayoutType.ColumnMajor + return (dtype, layout) + + +def _tensor_from_torch(pt_tensor): + dtype = library_type(pt_tensor.dtype) + return (dtype, cutlass.LayoutType.RowMajor) + + +def get_datatype_and_layout(tensor): + if (numpy_available and isinstance(tensor, np.ndarray)) or ( + cupy_available and isinstance(tensor, cp.ndarray) + ): + return _tensor_from_numpy(tensor) + elif torch_available and isinstance(tensor, torch.Tensor): + return _tensor_from_torch(tensor) + elif isinstance(tensor, float) or isinstance(tensor, int): + return (cutlass.DataType.f32, cutlass.LayoutType.RowMajor) + else: + raise Exception(f"Unable to convert tensor of type {type(tensor)} to Python-bound CUTLASS datatype and layout.") + +def get_tensor_shape(tensor, op="GEMM"): + if (numpy_available and isinstance(tensor, np.ndarray)) or ( + cupy_available and isinstance(tensor, cp.ndarray) + ): + return tensor.shape + elif torch_available and isinstance(tensor, torch.Tensor): + size = tensor.size() + if op == "CONV": + # PyTorch Tensors have shape NCHW + return (size[0], size[2], size[3], size[1]) + else: + return tuple(tensor.size()) + elif isinstance(tensor, float) or isinstance(tensor, int): + return (1,) + else: + raise Exception(f"Unable to convert tensor of type {type(tensor)} to Python-bound CUTLASS datatype and layout.") + + +_math_operation_value_map = {x.value: x for x in MathOperation} + + +def backend_math_operation(math_op: cutlass.MathOperation): + if math_op.value not in _math_operation_value_map.keys(): + raise Exception(f"Unable to convert math operation of type {math_op} to backend math operation.") + return _math_operation_value_map[math_op.value] + + +def construct_backend_td(td: cutlass.TileDescription, + kernel_schedule: cutlass.KernelScheduleType, + epilogue_schedule: cutlass.EpilogueScheduleType, + tile_scheduler: cutlass.TileSchedulerType) -> TileDescription: + mi = td.math_instruction + backend_mi = MathInstruction( + mi.instruction_shape, + mi.element_a, + mi.element_b, + mi.element_accumulator, + mi.opcode_class, + backend_math_operation(mi.math_operation) + ) + cluster_shape = td.cluster_shape if hasattr(td, "cluster_shape") else [1, 1, 1] + return TileDescription(td.threadblock_shape, td.stages, td.warp_count, + backend_mi, cluster_shape, kernel_schedule, epilogue_schedule, tile_scheduler) + + +def td_from_profiler_op(op) -> TileDescription: + """ + Converts the profiler's TileDescription in ``op`` into the backend TileDescription + + :param op: profiler Operation + + :returns: backend TileDescription + :rtype: cutlass.backend.TileDescription + """ + kschedule = op.kernel_schedule if hasattr(op, 'kernel_schedule') else None + eschedule = op.epilogue_schedule if hasattr(op, 'epilogue_schedule') else None + tschedule = op.tile_scheduler if hasattr(op, 'tile_scheduler') else None + return construct_backend_td(op.tile_description, kschedule, eschedule, tschedule) + + +def td_from_profiler_td(td: TileDescription) -> TileDescription: + """ + Converts the profiler's TileDescription into the backend TileDescription + + :param td: profiler TileDescription + :type td: cutlass.TileDescription + + :returns: backend TileDescription + :rtype: cutlass.backend.TileDescription + """ + return construct_backend_td(td, kernel_schedule=None, epilogue_schedule=None, tile_scheduler=None) + + +def to_camel_case(snake_str): + return "".join(x.capitalize() for x in snake_str.lower().split("_")) + + +def getattr_enum(obj, attr_name): + # The attr_name is under the snake_case + camel_attr = to_camel_case(attr_name) + if hasattr(obj, camel_attr): + return getattr(obj, camel_attr) + else: + raise Exception(f"Invalid option: {attr_name}") diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__init__.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dfc3154138653d063fe6f8d603b9b89b900707ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__init__.py @@ -0,0 +1,49 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +import sys + +from . import conv2d_operation +from . import conv3d_operation +from . import gemm_operation + +if '-m' not in sys.argv: + # Do not import generator when running python -m cutlass_library.generator to + # avoid double-import warnings + from . import generator + +from . import library +from . import manifest +from . import rank_2k_operation +from . import rank_k_operation +from . import symm_operation +from . import trmm_operation diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d018d06272a0de876761013d06b79baed565081c Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/conv2d_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/conv2d_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fd27ac15f56c5c0b22d26df262d74839dea99ad Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/conv2d_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/conv3d_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/conv3d_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82253078a2f98f02ecc4f85e0cde185584f9af64 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/conv3d_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/gemm_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/gemm_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..371ec5277cae403f45318e42f67dfad8f6e0031f Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/gemm_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/generator.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/generator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0abdca9c659c8dbde4d16b3c1bb56ab8efe590a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/generator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/library.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/library.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b5d07e51ee4bf87ad3f340f6b059710caf7683d Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/library.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/manifest.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/manifest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d158426d5fe233361c4992a81c064191425d6f96 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/manifest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/rank_2k_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/rank_2k_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26f052ec3d3f56f954584c8223b5e86fa76503f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/rank_2k_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/rank_k_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/rank_k_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d2a960c15b3c847bbb84b6b23228e35b71af680 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/rank_k_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/symm_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/symm_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6865091484ec780fc6d008a8804355cddfb61945 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/symm_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/trmm_operation.cpython-310.pyc b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/trmm_operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff7f140a0811701d293acc0cb1c8632c84f6cfb1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/__pycache__/trmm_operation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/conv2d_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/conv2d_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..b59771ef2e53602995b83c3297a78b81611d2b6a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/conv2d_operation.py @@ -0,0 +1,492 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Utilities for emitting Conv2d kernels +""" + +import enum +import os.path +import shutil + +from cutlass_library.library import * + +################################################################################################### + +# +class Conv2dOperation: + # + def __init__(self, conv_kind, iterator_algorithm, arch, tile_description, A, B, C, element_epilogue, \ + stride_support, epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity1, \ + group_mode = GroupMode.NoneGroup): + + self.operation_kind = OperationKind.Conv2d + self.arch = arch + self.tile_description = tile_description + self.conv_kind = conv_kind + self.A = A + self.B = B + self.C = C + self.element_epilogue = element_epilogue + self.epilogue_functor = epilogue_functor + self.iterator_algorithm = iterator_algorithm + self.stride_support = stride_support + self.swizzling_functor = swizzling_functor + self.group_mode = group_mode + # + def is_complex(self): + complex_operators = [ + MathOperation.multiply_add_complex, + MathOperation.multiply_add_complex_gaussian + ] + return self.tile_description.math_instruction.math_operation in complex_operators + + # + def accumulator_type(self): + accum = self.tile_description.math_instruction.element_accumulator + + if self.is_complex(): + return get_complex_from_real(accum) + + return accum + + # + def core_name(self): + ''' The basic operation kind is prefixed with a letter indicating the accumulation type. ''' + + intermediate_type = '' + + if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp: + inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape) + if self.tile_description.math_instruction.element_a != self.A.element and \ + self.tile_description.math_instruction.element_a != self.accumulator_type(): + intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] + else: + inst_shape = '' + + return "%s%s%s%s_%s" % (ShortDataTypeNames[self.accumulator_type()], \ + inst_shape, intermediate_type, ConvKindNames[self.conv_kind], IteratorAlgorithmNames[self.iterator_algorithm]) + + # + def extended_name(self): + ''' Append data types if they differ from compute type. ''' + if self.C.element != self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${element_c}_${core_name}_${element_a}" + elif self.C.element == self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${core_name}_${element_a}" + else: + extended_name = "${core_name}" + + extended_name = SubstituteTemplate(extended_name, { + 'element_a': DataTypeNames[self.A.element], + 'element_c': DataTypeNames[self.C.element], + 'core_name': self.core_name() + }) + + return extended_name + + # + def layout_name(self): + return "%s" % (ShortLayoutTypeNames[self.A.layout]) + + # + def configuration_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + + opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class] + + threadblock = self.tile_description.procedural_name() + + # grouped conv + if self.group_mode != GroupMode.NoneGroup: + group_conv_name = f"{GroupModeNames[self.group_mode]}_" + else: + group_conv_name = "" + + if self.stride_support == StrideSupport.Unity: + configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_unity_stride_${group_conv_name}align${alignment}" + else: + configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${group_conv_name}align${alignment}" + + return SubstituteTemplate( + configuration_name, + { + 'opcode_class': opcode_class_name, + 'extended_name': self.extended_name(), + 'threadblock': threadblock, + 'layout': self.layout_name(), + 'alignment': "%d" % self.A.alignment, + 'group_conv_name': group_conv_name + } + ) + + # + def procedural_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + return self.configuration_name() + +################################################################################################### +# +# Emits single instances of a CUTLASS device-wide operator +# +################################################################################################### + +class EmitConv2dInstance: + def __init__(self): + self.template = """ + // Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}" + using ${operation_name}_base = + typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}< + ${element_a}, + ${layout_a}, + ${element_b}, + ${layout_b}, + ${element_c}, + ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + >, + ${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>, + ${stages}, + ${math_operator}, + ${iterator_algorithm}, + ${stride_support}, + ${align_a}, + ${align_b} + >::Kernel; +""" + self.template_group_conv = """ + // Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}" + using ${operation_name}_base = + typename cutlass::conv::kernel::DefaultConv2dGroup${conv_kind_name}< + ${element_a}, + ${layout_a}, + ${element_b}, + ${layout_b}, + ${element_c}, + ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + >, + ${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>, + ${stages}, + ${math_operator}, + ${group_mode}, + ${iterator_algorithm}, + ${stride_support}, + ${align_a}, + ${align_b} + >::Kernel; +""" + self.template_depthwise_direct_conv = """ + // Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}" + using ${operation_name}_base = + typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConv${conv_kind_name}< + ${element_a}, + ${layout_a}, + ${element_b}, + ${layout_b}, + ${element_c}, + ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::conv::TensorNHWCShape<${threadblock_output_shape_n}, ${threadblock_output_shape_p}, ${threadblock_output_shape_q}, ${groups_per_cta}>, + cutlass::MatrixShape<${filter_shape_r}, ${filter_shape_s}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue}, + cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling + >, + + cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle< + 1, + ${threadblock_output_shape_n}, + ${threadblock_output_shape_p}, + ${threadblock_output_shape_q}>, + ${stages}, + ${math_operator}, + ${iterator_algorithm}, + ${stride_support}, + cutlass::MatrixShape<${stride_r}, ${stride_s}>, + cutlass::MatrixShape<${dilation_r}, ${dilation_s}> + >::Kernel; +""" + + def emit(self, operation): + + warp_shape = [int(operation.tile_description.threadblock_shape[idx] / operation.tile_description.warp_count[idx]) for idx in range(3)] + + epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) + + values = { + 'operation_name': operation.procedural_name(), + 'conv_kind': ConvKindTag[operation.conv_kind], + 'conv_kind_name': ConvKindNames[operation.conv_kind].capitalize(), + 'element_a': DataTypeTag[operation.A.element], + 'layout_a': LayoutTag[operation.A.layout], + 'element_b': DataTypeTag[operation.B.element], + 'layout_b': LayoutTag[operation.B.layout], + 'element_c': DataTypeTag[operation.C.element], + 'layout_c': LayoutTag[operation.C.layout], + 'element_accumulator': DataTypeTag[operation.accumulator_type()], + 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + 'arch': "cutlass::arch::Sm%d" % operation.arch, + 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), + 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), + 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), + 'warp_shape_m': str(warp_shape[0]), + 'warp_shape_n': str(warp_shape[1]), + 'warp_shape_k': str(warp_shape[2]), + 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), + 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), + 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), + 'epilogue_vector_length': str(epilogue_vector_length), + 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], + 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), + 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], + 'stages': str(operation.tile_description.stages), + 'iterator_algorithm': IteratorAlgorithmTag[operation.iterator_algorithm], + 'iterator_algorithm_name': IteratorAlgorithmNames[operation.iterator_algorithm].capitalize(), + 'stride_support': StrideSupportTag[operation.stride_support], + 'math_operator': 'cutlass::arch::OpMultiplyAddComplex' if operation.is_complex() else \ + MathOperationTag[operation.tile_description.math_instruction.math_operation], + 'align_a': str(operation.A.alignment), + 'align_b': str(operation.B.alignment), + } + + if operation.group_mode == GroupMode.NoneGroup: + return SubstituteTemplate(self.template, values) + + elif operation.group_mode == GroupMode.Depthwise: + values['group_mode'] = GroupModeTag[operation.group_mode] + # Setup other template params + values['threadblock_output_shape_n'] = str(operation.tile_description.threadblock_output_shape[0]) + values['threadblock_output_shape_p'] = str(operation.tile_description.threadblock_output_shape[1]) + values['threadblock_output_shape_q'] = str(operation.tile_description.threadblock_output_shape[2]) + + values['groups_per_cta'] = str(operation.tile_description.threadblock_output_shape[3]) + + values['filter_shape_r'] = str(operation.tile_description.filter_shape[0]) + values['filter_shape_s'] = str(operation.tile_description.filter_shape[1]) + + values['stride_r'] = str(operation.tile_description.stride[0]) + values['stride_s'] = str(operation.tile_description.stride[1]) + + values['dilation_r'] = str(operation.tile_description.dilation[0]) + values['dilation_s'] = str(operation.tile_description.dilation[1]) + + return SubstituteTemplate(self.template_depthwise_direct_conv, values) + + else: + values['group_mode'] = GroupModeTag[operation.group_mode] + return SubstituteTemplate(self.template_group_conv, values) + +################################################################################################### +# +# Generator functions for all layouts +# +################################################################################################### + +# +def GenerateConv2dTensorOp(manifest, tile_descriptions, min_cc, align = 128): + + for tile in tile_descriptions: + for conv_kind in [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad]: + + if conv_kind == ConvKind.Fprop or (tile.math_instruction.element_accumulator in [DataType.f16, DataType.f32]): + + # + output_types = [tile.math_instruction.element_a, tile.math_instruction.element_accumulator] \ + if DataTypeSize[tile.math_instruction.element_accumulator] == 32 \ + else [tile.math_instruction.element_accumulator,] + + for output_type in output_types: + A = TensorDescription(tile.math_instruction.element_a, LayoutType.TensorNHWC, int(align / DataTypeSize[tile.math_instruction.element_a])) + B = TensorDescription(tile.math_instruction.element_b, LayoutType.TensorNHWC, int(align / DataTypeSize[tile.math_instruction.element_b])) + C = TensorDescription(output_type, LayoutType.TensorNHWC, max(1, int(align / DataTypeSize[output_type]))) + + manifest.append(Conv2dOperation(conv_kind, min_cc, tile, A, B, C, tile.math_instruction.element_accumulator)) + +################################################################################################### +# +# Emitters functions for all targets +# +################################################################################################### + +class EmitConv2dConfigurationLibrary: + def __init__(self, operation_path, configuration_name): + self.configuration_name = configuration_name + self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name) + + self.instance_emitter = EmitConv2dInstance() + + self.instance_template = """ +${operation_instance} + +// Derived class +struct ${operation_name} : + public ${operation_name}_base { }; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + self.header_template = """ +/* + Generated by conv2d_operation.py - Do not edit. +*/ + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "library_internal.h" +#include "conv2d_operation.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// +""" + + self.configuration_header = """ + +namespace cutlass { +namespace library { + +// Initialize all instances +void initialize_${configuration_name}(Manifest &manifest) { + +""" + + self.configuration_instance = """ + using Operation_${operation_name} = cutlass::conv::device::ImplicitGemmConvolution< + ${operation_name}>; + + manifest.append(new cutlass::library::Conv2dOperation< + Operation_${operation_name}>( + "${operation_name}")); + +""" + + self.configuration_direct_conv_instance = """ + using Operation_${operation_name} = cutlass::conv::device::DirectConvolution< + ${operation_name}>; + + manifest.append(new cutlass::library::DirectConv2dOperation< + Operation_${operation_name}>( + "${operation_name}")); + +""" + + self.configuration_epilogue = """ +} +""" + self.epilogue_template = """ + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + + # + def __enter__(self): + self.configuration_file = open(self.configuration_path, "w") + self.configuration_file.write(SubstituteTemplate(self.header_template, { + 'configuration_name': self.configuration_name + })) + self.operations = [] + return self + + # + def emit(self, operation): + self.operations.append(operation) + self.configuration_file.write(SubstituteTemplate(self.instance_template, { + 'configuration_name': self.configuration_name, + 'operation_name': operation.procedural_name(), + 'operation_instance': self.instance_emitter.emit(operation) + })) + + # + def __exit__(self, exception_type, exception_value, traceback): + + self.configuration_file.write(SubstituteTemplate(self.configuration_header, { + 'configuration_name': self.configuration_name + })) + + for operation in self.operations: + if operation.group_mode == GroupMode.Depthwise: + self.configuration_file.write(SubstituteTemplate(self.configuration_direct_conv_instance, { + 'configuration_name': self.configuration_name, + 'operation_name': operation.procedural_name() + })) + else: + self.configuration_file.write(SubstituteTemplate(self.configuration_instance, { + 'configuration_name': self.configuration_name, + 'operation_name': operation.procedural_name() + })) + + self.configuration_file.write(self.configuration_epilogue) + self.configuration_file.write(self.epilogue_template) + self.configuration_file.close() + + +################################################################################################### +################################################################################################### diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/conv3d_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/conv3d_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..0a3265bb8c8d4617cb9650cde965078df19d628e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/conv3d_operation.py @@ -0,0 +1,350 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Utilities for emitting Conv3d kernels +""" + +import enum +import os.path +import shutil + +from cutlass_library.library import * + +################################################################################################### + +# +class Conv3dOperation: + # + def __init__(self, conv_kind, iterator_algorithm, arch, tile_description, A, B, C, element_epilogue, \ + stride_support, epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4): + + self.operation_kind = OperationKind.Conv3d + self.arch = arch + self.tile_description = tile_description + self.conv_kind = conv_kind + self.A = A + self.B = B + self.C = C + self.element_epilogue = element_epilogue + self.epilogue_functor = epilogue_functor + self.iterator_algorithm = iterator_algorithm + self.stride_support = stride_support + self.swizzling_functor = swizzling_functor + + # + def core_name(self): + ''' The basic operation kind is prefixed with a letter indicating the accumulation type. ''' + + intermediate_type = '' + + if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp: + inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape) + if self.tile_description.math_instruction.element_a != self.A.element and \ + self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator: + intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] + else: + inst_shape = '' + + return "%s%s%s%s3d_%s" % (ShortDataTypeNames[self.tile_description.math_instruction.element_accumulator], \ + inst_shape, intermediate_type, ConvKindNames[self.conv_kind], IteratorAlgorithmNames[self.iterator_algorithm]) + + # + def extended_name(self): + ''' Append data types if they differ from compute type. ''' + if self.C.element != self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${element_c}_${core_name}_${element_a}" + elif self.C.element == self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${core_name}_${element_a}" + else: + extended_name = "${core_name}" + + extended_name = SubstituteTemplate(extended_name, { + 'element_a': DataTypeNames[self.A.element], + 'element_c': DataTypeNames[self.C.element], + 'core_name': self.core_name() + }) + + return extended_name + + # + def configuration_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + + opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class] + + threadblock = "%dx%d_%dx%d" % ( + self.tile_description.threadblock_shape[0], + self.tile_description.threadblock_shape[1], + self.tile_description.threadblock_shape[2], + self.tile_description.stages + ) + + if self.stride_support == StrideSupport.Unity: + configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_unity_stride" + else: + configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}" + + return SubstituteTemplate( + configuration_name, + { + 'opcode_class': opcode_class_name, + 'extended_name': self.extended_name(), + 'threadblock': threadblock, + } + ) + + # + def procedural_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + return self.configuration_name() + +################################################################################################### +# +# Emits single instances of a CUTLASS device-wide operator +# +################################################################################################### + +class EmitConv3dInstance: + def __init__(self): + self.template = """ + // Conv3d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}" + using ${operation_name}_base = + typename cutlass::conv::kernel::DefaultConv3d${conv_kind_name}< + ${element_a}, + cutlass::layout::TensorNDHWC, + ${element_b}, + cutlass::layout::TensorNDHWC, + ${element_c}, + cutlass::layout::TensorNDHWC, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + >, + ${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>, + ${stages}, + cutlass::arch::OpMultiplyAdd, + ${iterator_algorithm}, + ${stride_support} + >::Kernel; +""" + + + def emit(self, operation): + + warp_shape = [int(operation.tile_description.threadblock_shape[idx] / operation.tile_description.warp_count[idx]) for idx in range(3)] + + epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) + + values = { + 'operation_name': operation.procedural_name(), + 'conv_kind': ConvKindTag[operation.conv_kind], + 'conv_kind_name': ConvKindNames[operation.conv_kind].capitalize(), + 'element_a': DataTypeTag[operation.A.element], + 'layout_a': LayoutTag[operation.A.layout], + 'element_b': DataTypeTag[operation.B.element], + 'layout_b': LayoutTag[operation.B.layout], + 'element_c': DataTypeTag[operation.C.element], + 'layout_c': LayoutTag[operation.C.layout], + 'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator], + 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + 'arch': "cutlass::arch::Sm%d" % operation.arch, + 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), + 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), + 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), + 'warp_shape_m': str(warp_shape[0]), + 'warp_shape_n': str(warp_shape[1]), + 'warp_shape_k': str(warp_shape[2]), + 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), + 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), + 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), + 'epilogue_vector_length': str(epilogue_vector_length), + 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], + 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), + 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], + 'stages': str(operation.tile_description.stages), + 'iterator_algorithm': IteratorAlgorithmTag[operation.iterator_algorithm], + 'iterator_algorithm_name': IteratorAlgorithmNames[operation.iterator_algorithm].capitalize(), + 'stride_support': StrideSupportTag[operation.stride_support] + } + + return SubstituteTemplate(self.template, values) + +################################################################################################### +# +# Generator functions for all layouts +# +################################################################################################### + +# +def GenerateConv3dTensorOp(manifest, tile_descriptions, min_cc, align = 128): + + for tile in tile_descriptions: + for conv_kind in [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad]: + + if conv_kind == ConvKind.Fprop or (tile.math_instruction.element_accumulator in [DataType.f16, DataType.f32]): + + # + output_types = [tile.math_instruction.element_a, tile.math_instruction.element_accumulator] \ + if DataTypeSize[tile.math_instruction.element_accumulator] == 32 \ + else [tile.math_instruction.element_accumulator,] + + for output_type in output_types: + A = TensorDescription(tile.math_instruction.element_a, LayoutType.TensorNDHWC, int(align / DataTypeSize[tile.math_instruction.element_a])) + B = TensorDescription(tile.math_instruction.element_b, LayoutType.TensorNDHWC, int(align / DataTypeSize[tile.math_instruction.element_b])) + C = TensorDescription(output_type, LayoutType.TensorNDHWC, max(1, int(align / DataTypeSize[output_type]))) + + manifest.append(Conv3dOperation(conv_kind, min_cc, tile, A, B, C, tile.math_instruction.element_accumulator)) + +################################################################################################### +# +# Emitters functions for all targets +# +################################################################################################### + +class EmitConv3dConfigurationLibrary: + def __init__(self, operation_path, configuration_name): + self.configuration_name = configuration_name + self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name) + + self.instance_emitter = EmitConv3dInstance() + + self.instance_template = """ +${operation_instance} + +// Derived class +struct ${operation_name} : + public ${operation_name}_base { }; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + self.header_template = """ +/* + Generated by conv3d_operation.py - Do not edit. +*/ + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "library_internal.h" +#include "conv3d_operation.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// +""" + + self.configuration_header = """ + +namespace cutlass { +namespace library { + +// Initialize all instances +void initialize_${configuration_name}(Manifest &manifest) { + +""" + + self.configuration_instance = """ + using Operation_${operation_name} = cutlass::conv::device::ImplicitGemmConvolution< + ${operation_name}>; + + manifest.append(new cutlass::library::Conv3dOperation< + Operation_${operation_name}>( + "${operation_name}")); + +""" + + self.configuration_epilogue = """ +} +""" + self.epilogue_template = """ + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + + # + def __enter__(self): + self.configuration_file = open(self.configuration_path, "w") + self.configuration_file.write(SubstituteTemplate(self.header_template, { + 'configuration_name': self.configuration_name + })) + self.operations = [] + return self + + # + def emit(self, operation): + self.operations.append(operation) + self.configuration_file.write(SubstituteTemplate(self.instance_template, { + 'configuration_name': self.configuration_name, + 'operation_name': operation.procedural_name(), + 'operation_instance': self.instance_emitter.emit(operation) + })) + + # + def __exit__(self, exception_type, exception_value, traceback): + + self.configuration_file.write(SubstituteTemplate(self.configuration_header, { + 'configuration_name': self.configuration_name + })) + + for operation in self.operations: + self.configuration_file.write(SubstituteTemplate(self.configuration_instance, { + 'configuration_name': self.configuration_name, + 'operation_name': operation.procedural_name() + })) + + self.configuration_file.write(self.configuration_epilogue) + self.configuration_file.write(self.epilogue_template) + self.configuration_file.close() + + +################################################################################################### +################################################################################################### + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/gemm_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/gemm_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..e92b891f3c70f01f0371324543a8b472abe49976 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/gemm_operation.py @@ -0,0 +1,1237 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Utilities for emitting GEMM kernels +""" + +import enum +import os.path +import shutil +import functools +import operator +import collections + +from cutlass_library.library import * + +################################################################################################### +# +# Data structure modeling a GEMM operation +# +################################################################################################### + +# +class GemmOperation: + # + def __init__(self, gemm_kind, arch, tile_description, A, B, C, element_epilogue, \ + epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, D = None, + kernel_schedule = KernelScheduleType.ScheduleAuto, epilogue_schedule = EpilogueScheduleType.ScheduleAuto, + tile_scheduler = TileSchedulerType.Default): + + self.prefix = "3x" if gemm_kind == GemmKind.Universal3x else "" + self.operation_kind = OperationKind.Gemm + self.arch = arch + self.tile_description = tile_description + self.gemm_kind = gemm_kind + self.A = A + self.B = B + self.C = C + self.D = D + if self.D == None: + self.D = self.C + + if gemm_kind != GemmKind.Universal3x: + assert(kernel_schedule == KernelScheduleType.ScheduleAuto) + assert(epilogue_schedule == EpilogueScheduleType.ScheduleAuto) + self.kernel_schedule = kernel_schedule + self.epilogue_schedule = epilogue_schedule + self.element_epilogue = element_epilogue + self.epilogue_functor = epilogue_functor + self.swizzling_functor = swizzling_functor + self.tile_scheduler = tile_scheduler + + # + def is_complex(self): + complex_operators = [ + MathOperation.multiply_add_complex, + MathOperation.multiply_add_complex_gaussian, + MathOperation.multiply_add_complex_fast_f32 + ] + return self.tile_description.math_instruction.math_operation in complex_operators + + # + def is_planar_complex(self): + return self.gemm_kind in (GemmKind.PlanarComplex, GemmKind.PlanarComplexArray) + + # + def accumulator_type(self): + accum = self.tile_description.math_instruction.element_accumulator + + if self.is_complex(): + return get_complex_from_real(accum) + + return accum + + # + def short_math_name(self): + if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian: + return "g%s" % ShortDataTypeNames[self.accumulator_type()] + return ShortDataTypeNames[self.accumulator_type()] + + + # + def core_name(self): + ''' The basic operation kind is prefixed with a letter indicating the accumulation type. ''' + + inst_shape = '' + inst_operation = '' + intermediate_type = '' + + math_operations_map = { + MathOperation.xor_popc: 'xor', + MathOperation.and_popc: 'and' + } + + if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \ + self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp: + + math_op = self.tile_description.math_instruction.math_operation + math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else '' + + if self.gemm_kind == GemmKind.Universal3x: + inst_shape = "{0}x{1}x{2}".format(*tuple(self.tile_description.math_instruction.instruction_shape)) + else: + inst_shape = "{0}{1}{2}".format(*tuple(self.tile_description.math_instruction.instruction_shape)) + + inst_shape += math_op_string + + if self.tile_description.math_instruction.element_a != self.A.element and \ + self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator: + intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] + + return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, GemmKindNames[self.gemm_kind]) + + # Generates a string representing the MMA instruction. + def extended_name(self): + ''' Append data types if they differ from compute type. ''' + if self.is_complex(): + extended_name = "${core_name}" + else: + if self.C.element != self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${element_c}_${core_name}_${element_a}" + elif self.C.element == self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${core_name}_${element_a}" + else: + extended_name = "${core_name}" + + extended_name = SubstituteTemplate(extended_name, { + 'element_a': DataTypeNames[self.A.element], + 'element_c': DataTypeNames[self.C.element], + 'core_name': self.core_name() + }) + + return extended_name + + def extended_name_3x(self): + '''Generates a string representing the MMA atom. Assumes accumulator type is C type.''' + extended_name = "{core_name}_{element_a}_{element_b}_{element_acc}_{element_c}_{element_d}".format( + element_a = DataTypeNames[self.A.element], + element_b = DataTypeNames[self.B.element], + element_acc = DataTypeNames[self.tile_description.math_instruction.element_accumulator], + element_c = DataTypeNames[self.C.element], + element_d = DataTypeNames[self.D.element], + core_name = self.core_name()) + return extended_name + + # Generates a short string representing the AB layout tags (e.g. nt or tn) + def layout_name(self): + if self.is_complex() or self.is_planar_complex(): + return "%s%s" % ( + ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)], + ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)] + ) + return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout]) + + # Generates a short string representing the ABC layout tags (e.g. ntn or tnn) + def layout_name_3x(self): + if self.is_complex() or self.is_planar_complex(): + return "{}{}{}".format( + ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)], + ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)], + ShortComplexLayoutNames[(self.C.layout, self.C.complex_transform)]) + else: + return "{}{}{}".format( + ShortLayoutTypeNames[self.A.layout], + ShortLayoutTypeNames[self.B.layout], + ShortLayoutTypeNames[self.C.layout]) + + # Generates a short string representing underlying kernel schedule type + def kernel_schedule_name_3x(self): + return KernelScheduleSuffixes[self.kernel_schedule] + + # Generates a short string representing underlying epilogue schedule type + def epilogue_schedule_name_3x(self): + return EpilogueScheduleSuffixes[self.epilogue_schedule] + + # Generates the full kernel function name + def procedural_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class] + if self.arch >= 90: + kernel_name_template = "cutlass{p}_sm{ar}_{op}_{ex}_{tbm}x{tbn}x{tbk}_{cm}x{cn}x{ck}_{l}_{s}_align{al}{t}{k}{e}" + return kernel_name_template.format( + p = self.prefix, + ar = self.arch, + op = opcode_class_name, + ex = self.extended_name_3x(), + tbm = self.tile_description.tile_shape[0], + tbn = self.tile_description.tile_shape[1], + tbk = self.tile_description.tile_shape[2], + cm = self.tile_description.cluster_shape[0], + cn = self.tile_description.cluster_shape[1], + ck = self.tile_description.cluster_shape[2], + l = self.tile_description.stages, + s = self.layout_name_3x(), + al = str(max(self.A.alignment, self.B.alignment)), + t = TileSchedulerSuffixes[self.tile_scheduler], + k = self.kernel_schedule_name_3x(), + e = self.epilogue_schedule_name_3x()) + else: + threadblock = self.tile_description.procedural_name() + return "cutlass{p}_{op}_{ex}_{tb}_{l}_align{a}".format( + p = self.prefix, + op = opcode_class_name, + ex = self.extended_name(), + tb = threadblock, + l = self.layout_name(), + a = str(self.A.alignment)) + + # + def configuration_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + return self.procedural_name() + + def __hash__(self): + return hash(self.configuration_name()) + + def __eq__(self, other): + return self.configuration_name() == other.configuration_name() + +################################################################################################### +# +# Data structure modeling a grouped GEMM operation +# +################################################################################################### + +# +class GroupedGemmOperation(GemmOperation): + # + def __init__(self, gemm_kind, arch, tile_description, A, B, C, element_epilogue, \ + epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \ + scheduler_mode = GroupScheduleMode.Device): + super().__init__(gemm_kind, arch, tile_description, A, B, C, element_epilogue, \ + epilogue_functor, swizzling_functor) + + self.scheduler_mode = scheduler_mode + + # + def procedural_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + base = super().procedural_name() + return SubstituteTemplate( + base + "_schedule${schedule}", + { + 'schedule': ShortGroupScheduleModeNames[self.scheduler_mode] + }) + + +################################################################################################### +# +# Emits single instances of a CUTLASS device-wide operator +# +################################################################################################### + +# +class EmitGemmInstance: + ''' Responsible for emitting a CUTLASS template definition''' + + def __init__(self, operation_suffix = ''): + self.operation_suffix = operation_suffix + self.includes = [] + self.gemm_template = """ + // Gemm operator ${operation_name} + using Operation_${operation_name} = cutlass::gemm::device::Gemm< + ${element_a}, ${layout_a}, + ${element_b}, ${layout_b}, + ${element_c}, ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + >, + ${swizzling_functor}, + ${stages}, + ${align_a}, + ${align_b}, + false, + ${math_operation} + ${residual} + >; +""" + self.gemm_complex_template = """ + // Gemm operator ${operation_name} + using Operation_${operation_name} = cutlass::gemm::device::GemmComplex< + ${element_a}, ${layout_a}, + ${element_b}, ${layout_b}, + ${element_c}, ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + >, + ${swizzling_functor}, + ${stages}, + ${transform_a}, + ${transform_b}, + ${math_operation} + ${residual} + >; +""" + + # + def instance_template(self): + return """ +${compile_guard_start} + manifest.append(new ${gemm_kind}("${operation_name}")); +${compile_guard_end} +""" + + # + def emit(self, operation): + + warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)] + + epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) + + residual = '' + + values = { + 'operation_name': operation.procedural_name(), + 'element_a': DataTypeTag[operation.A.element], + 'layout_a': LayoutTag[operation.A.layout], + 'element_b': DataTypeTag[operation.B.element], + 'layout_b': LayoutTag[operation.B.layout], + 'element_c': DataTypeTag[operation.C.element], + 'layout_c': LayoutTag[operation.C.layout], + 'element_accumulator': DataTypeTag[operation.accumulator_type()], + 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + 'arch': "cutlass::arch::Sm%d" % operation.arch, + 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), + 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), + 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), + 'warp_shape_m': str(warp_shape[0]), + 'warp_shape_n': str(warp_shape[1]), + 'warp_shape_k': str(warp_shape[2]), + 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), + 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), + 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), + 'epilogue_vector_length': str(epilogue_vector_length), + 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), + 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], + 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], + 'stages': str(operation.tile_description.stages), + 'align_a': str(operation.A.alignment), + 'align_b': str(operation.B.alignment), + 'transform_a': ComplexTransformTag[operation.A.complex_transform], + 'transform_b': ComplexTransformTag[operation.B.complex_transform], + 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], + 'residual': residual + } + + template = self.gemm_complex_template if operation.is_complex() else self.gemm_template + + return SubstituteTemplate(template, values) + +################################################################################################### + +class EmitSparseGemmInstance: + ''' Responsible for emitting a CUTLASS template definition''' + + def __init__(self, operation_suffix = ''): + self.operation_suffix = operation_suffix + self.includes = [] + self.gemm_template = """ + // Gemm operator ${operation_name} + using Operation_${operation_name} = cutlass::gemm::device::SparseGemm< + ${element_a}, ${layout_a}, + ${element_b}, ${layout_b}, + ${element_c}, ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + >, + ${swizzling_functor}, + ${stages}, + ${align_a}, + ${align_b}, + false, + ${math_operation} + ${residual} + >; +""" + + # + def instance_template(self): + return """ +${compile_guard_start} + manifest.append(new ${gemm_kind}("${operation_name}")); +${compile_guard_end} +""" + + # + def emit(self, operation): + + warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)] + + epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) + + residual = '' + + values = { + 'operation_name': operation.procedural_name(), + 'element_a': DataTypeTag[operation.A.element], + 'layout_a': LayoutTag[operation.A.layout], + 'element_b': DataTypeTag[operation.B.element], + 'layout_b': LayoutTag[operation.B.layout], + 'element_c': DataTypeTag[operation.C.element], + 'layout_c': LayoutTag[operation.C.layout], + 'element_accumulator': DataTypeTag[operation.accumulator_type()], + 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + 'arch': "cutlass::arch::Sm%d" % operation.arch, + 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), + 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), + 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), + 'warp_shape_m': str(warp_shape[0]), + 'warp_shape_n': str(warp_shape[1]), + 'warp_shape_k': str(warp_shape[2]), + 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), + 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), + 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), + 'epilogue_vector_length': str(epilogue_vector_length), + 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), + 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], + 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], + 'stages': str(operation.tile_description.stages), + 'align_a': str(operation.A.alignment), + 'align_b': str(operation.B.alignment), + 'transform_a': ComplexTransformTag[operation.A.complex_transform], + 'transform_b': ComplexTransformTag[operation.B.complex_transform], + 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], + 'residual': residual + } + + template = self.gemm_template + + return SubstituteTemplate(template, values) + +################################################################################################### + + +# +class EmitGemmUniversalInstance: + ''' Responsible for emitting a CUTLASS template definition''' + + def __init__(self, operation_suffix = ''): + self.operation_suffix = operation_suffix + self.includes = [ + "cutlass/cutlass.h", + "cutlass/numeric_types.h", + "cutlass/arch/arch.h", + "cutlass/arch/mma.h", + "cutlass/layout/matrix.h", + "cutlass/gemm/device/gemm.h", + "cutlass/gemm/device/gemm_universal_adapter.h", + "cutlass/gemm/kernel/default_gemm_universal.h", + ] + self.builtin_epilogue_functor_template = """ + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + > +""" + self.gemm_template = """ +// Gemm operator ${operation_name} +using ${operation_name}_base = + typename cutlass::gemm::kernel::DefaultGemmUniversal< + ${element_b}, ${layout_b}, ${transform_b}, ${align_b}, // transposed B operand + ${element_a}, ${layout_a}, ${transform_a}, ${align_a}, // transposed A operand + ${element_c}, ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}, + ${swizzling_functor}, + ${stages}, + ${math_operation} +>::GemmKernel; + +// Define named type +struct ${operation_name}${operation_suffix} : + public ${operation_name}_base { }; +""" + self.gemm_template_interleaved = """ +// Gemm operator ${operation_name} +using ${operation_name}_base = + typename cutlass::gemm::kernel::DefaultGemmUniversal< + ${element_a}, ${layout_a}, ${transform_a}, ${align_a}, + ${element_b}, ${layout_b}, ${transform_b}, ${align_b}, + ${element_c}, ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}, + ${swizzling_functor}, + ${stages}, + ${math_operation} +>::GemmKernel; + +// Define named type +struct ${operation_name}${operation_suffix} : + public ${operation_name}_base { }; +""" + + # + def instance_template(self): + return """ +${compile_guard_start} + manifest.append(new ${gemm_kind}< + cutlass::gemm::device::GemmUniversalAdapter<${operation_name}> + >("${operation_name}")); +${compile_guard_end} +""" + + # + def emit(self, operation): + + threadblock_shape = operation.tile_description.threadblock_shape + warp_count = operation.tile_description.warp_count + + warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)] + + transpose_layouts = { + LayoutType.ColumnMajor: LayoutType.RowMajor, + LayoutType.RowMajor: LayoutType.ColumnMajor + } + + if operation.A.layout in transpose_layouts.keys() and \ + operation.B.layout in transpose_layouts.keys() and \ + operation.C.layout in transpose_layouts.keys(): + + instance_layout_A = transpose_layouts[operation.A.layout] + instance_layout_B = transpose_layouts[operation.B.layout] + instance_layout_C = transpose_layouts[operation.C.layout] + + gemm_template = self.gemm_template + else: + instance_layout_A, instance_layout_B, instance_layout_C = \ + (operation.A.layout, operation.B.layout, operation.C.layout) + + gemm_template = self.gemm_template_interleaved + # + + # Support built-in epilogue functors or user-defined functions + if isinstance(operation.epilogue_functor, enum.Enum): + + epilogue_vector_length = \ + min(operation.C.alignment * DataTypeSize[operation.C.element], 128) // DataTypeSize[operation.C.element] + + values = { + 'epilogue_vector_length': str(epilogue_vector_length), + 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), + 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], + } + epilogue_functor = SubstituteTemplate(self.builtin_epilogue_functor_template, values) + else: + epilogue_functor = self.epilogue_functor.emit_declaration() + # + + values = { + 'operation_name': operation.procedural_name(), + 'operation_suffix': self.operation_suffix, + 'element_a': DataTypeTag[operation.A.element], + 'layout_a': LayoutTag[instance_layout_A], + 'element_b': DataTypeTag[operation.B.element], + 'layout_b': LayoutTag[instance_layout_B], + 'element_c': DataTypeTag[operation.C.element], + 'layout_c': LayoutTag[instance_layout_C], + 'element_accumulator': DataTypeTag[operation.accumulator_type()], + 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + 'arch': "cutlass::arch::Sm%d" % operation.arch, + 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), + 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), + 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), + 'warp_shape_m': str(warp_shape[0]), + 'warp_shape_n': str(warp_shape[1]), + 'warp_shape_k': str(warp_shape[2]), + 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), + 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), + 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), + 'epilogue_functor': epilogue_functor, + 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], + 'stages': str(operation.tile_description.stages), + 'align_a': str(operation.A.alignment), + 'align_b': str(operation.B.alignment), + 'transform_a': ComplexTransformTag[operation.A.complex_transform], + 'transform_b': ComplexTransformTag[operation.B.complex_transform], + 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation] + } + + return SubstituteTemplate(gemm_template, values) + + +################################################################################################### + +# +class EmitGemmUniversal3xInstance: + ''' Responsible for emitting a CUTLASS 3.x template definition''' + + def __init__(self, operation_suffix = ''): + self.operation_suffix = operation_suffix + self.includes = [ + "cutlass/cutlass.h", + "cutlass/gemm/gemm.h", + "cutlass/numeric_types.h", + "cutlass/gemm/kernel/gemm_universal.hpp", + "cutlass/gemm/collective/collective_builder.hpp", + "cutlass/epilogue/collective/collective_builder.hpp", + ] + self.builtin_epilogue_functor_template = """ + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + > +""" + self.gemm_template = """ + +using ${operation_name}_epilogue = + typename cutlass::epilogue::collective::CollectiveBuilder< + ${arch}, ${opcode_class}, + cute::Shape, + cute::Shape, + cutlass::epilogue::collective::EpilogueTileAuto, + ${element_accumulator}, ${element_epilogue}, + ${element_c}, ${layout_c}, ${align_c}, + ${element_d}, ${layout_d}, ${align_d}, + ${epilogue_schedule} + >::CollectiveOp; + +using ${operation_name}_mainloop = + typename cutlass::gemm::collective::CollectiveBuilder< + ${arch}, ${opcode_class}, + ${element_a}, ${layout_a}, ${align_a}, + ${element_b}, ${layout_b}, ${align_b}, + ${element_accumulator}, + cute::Shape, + cute::Shape, + ${stages}, + ${kernel_schedule} + >::CollectiveOp; + +// Gemm operator ${operation_name} +using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal< + cute::Shape, + ${operation_name}_mainloop, + ${operation_name}_epilogue, + ${tile_scheduler}>; + +// Define named type +struct ${operation_name} : + public ${operation_name}_base { }; + +""" + # + def instance_template(self): + return """ +${compile_guard_start} + using GemmKernel = cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>; + manifest.append( + new ${gemm_kind}("${operation_name}")); +${compile_guard_end} +""" + + # + def emit(self, operation): + + tile_shape = operation.tile_description.tile_shape + warp_count = operation.tile_description.warp_count + # stage count set to zero indicates builder automatic stage selection + if operation.tile_description.stages > 0: + stage_count_string = f"cutlass::gemm::collective::StageCount<{str(operation.tile_description.stages)}>" + else: + stage_count_string = f"cutlass::gemm::collective::StageCountAutoCarveout" + warp_shape = [tile_shape[idx] // warp_count[idx] for idx in range(3)] + + instance_layout_A, instance_layout_B, instance_layout_C , instance_layout_D = \ + (operation.A.layout, operation.B.layout, operation.C.layout, operation.D.layout) + + # 3.0 profiler integration only supports trivial epilogues for now + epilogue_vector_length = 1 + + # Support built-in epilogue functors or user-defined functions + if isinstance(operation.epilogue_functor, enum.Enum): + values = { + 'epilogue_vector_length': str(epilogue_vector_length), + 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), + 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], + } + epilogue_functor = SubstituteTemplate(self.builtin_epilogue_functor_template, values) + else: + epilogue_functor = self.epilogue_functor.emit_declaration() + # + + values = { + 'operation_name': operation.procedural_name(), + 'operation_suffix': self.operation_suffix, + 'element_a': DataTypeTag[operation.A.element], + 'layout_a': LayoutTag[instance_layout_A], + 'element_b': DataTypeTag[operation.B.element], + 'layout_b': LayoutTag[instance_layout_B], + 'element_c': DataTypeTag[operation.C.element], + 'layout_c': LayoutTag[instance_layout_C], + 'element_d': DataTypeTag[operation.D.element], + 'layout_d': LayoutTag[instance_layout_D], + 'element_accumulator': DataTypeTag[operation.accumulator_type()], + 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + 'arch': "cutlass::arch::Sm%d" % operation.arch, + 'tile_shape_m': str(operation.tile_description.tile_shape[0]), + 'tile_shape_n': str(operation.tile_description.tile_shape[1]), + 'tile_shape_k': str(operation.tile_description.tile_shape[2]), + 'cluster_m': str(operation.tile_description.cluster_shape[0]), + 'cluster_n': str(operation.tile_description.cluster_shape[1]), + 'cluster_k': str(operation.tile_description.cluster_shape[2]), + 'warp_shape_m': str(warp_shape[0]), + 'warp_shape_n': str(warp_shape[1]), + 'warp_shape_k': str(warp_shape[2]), + 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), + 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), + 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), + 'kernel_schedule' : str(KernelScheduleTag[operation.kernel_schedule]), + 'epilogue_schedule' : str(EpilogueScheduleTag[operation.epilogue_schedule]), + 'epilogue_functor': epilogue_functor, + 'stages': stage_count_string, + 'align_a': str(operation.A.alignment), + 'align_b': str(operation.B.alignment), + 'align_c': str(operation.C.alignment), + 'align_d': str(operation.C.alignment), + 'transform_a': ComplexTransformTag[operation.A.complex_transform], + 'transform_b': ComplexTransformTag[operation.B.complex_transform], + 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], + 'epilogue_vector_length': str(epilogue_vector_length), + 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), + 'tile_scheduler': str(TileSchedulerTag[operation.tile_scheduler]) + } + + return SubstituteTemplate(self.gemm_template, values) + +################################################################################################### + +# +class EmitGemmPlanarComplexInstance: + ''' Responsible for emitting a CUTLASS template definition''' + + def __init__(self, operation_suffix = ''): + self.operation_suffix = operation_suffix + self.includes = [] + self.template = """ + // Gemm operator ${operation_name} + using Operation_${operation_name} = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + ${element_a}, ${layout_a}, ${transform_a}, ${alignment_a}, + ${element_b}, ${layout_b}, ${transform_b}, ${alignment_b}, + ${element_c}, cutlass::layout::RowMajor, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + ${element_c}, + ${alignment_c}, + ${element_accumulator}, + ${element_epilogue} + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + ${stages}, + ${math_operator} + >::GemmKernel; + + struct ${operation_name} : + public Operation_${operation_name} { }; +""" + + # + def instance_template(self): + return """ +${compile_guard_start} + manifest.append(new ${gemm_kind}< + cutlass::gemm::device::GemmUniversalAdapter<${operation_name}> + >("${operation_name}")); +${compile_guard_end} +""" + + # + def emit(self, operation): + + warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)] + + # exchange and transpose A and B types, layouts, and complex transforms since the C layout is row-major + transposed_layout_A = TransposedLayout[operation.A.layout] + transposed_layout_B = TransposedLayout[operation.B.layout] + + values = { + 'operation_name': operation.procedural_name(), + 'element_a': DataTypeTag[operation.B.element], + 'layout_a': LayoutTag[transposed_layout_B], + 'transform_a': ComplexTransformTag[operation.B.complex_transform], + 'alignment_a': str(operation.B.alignment), + 'element_b': DataTypeTag[operation.A.element], + 'layout_b': LayoutTag[transposed_layout_A], + 'transform_b': ComplexTransformTag[operation.A.complex_transform], + 'alignment_b': str(operation.A.alignment), + 'element_c': DataTypeTag[operation.C.element], + 'layout_c': LayoutTag[operation.C.layout], + 'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator], + 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + 'arch': "cutlass::arch::Sm%d" % operation.arch, + 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), + 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), + 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), + 'warp_shape_m': str(warp_shape[0]), + 'warp_shape_n': str(warp_shape[1]), + 'warp_shape_k': str(warp_shape[2]), + 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), + 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), + 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), + 'alignment_c': str(operation.C.alignment), + 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), + 'stages': str(operation.tile_description.stages), + 'math_operator': 'cutlass::arch::OpMultiplyAdd' + } + + return SubstituteTemplate(self.template, values) + +################################################################################################### + +# +class EmitGemmPlanarComplexArrayInstance: + ''' Responsible for emitting a CUTLASS template definition''' + + def __init__(self, operation_suffix = ''): + self.operation_suffix = operation_suffix + self.includes = [] + self.template = """ + // Gemm operator ${operation_name} + using Operation_${operation_name} = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< + ${element_a}, ${layout_a}, ${transform_a}, ${alignment_a}, + ${element_b}, ${layout_b}, ${transform_b}, ${alignment_b}, + ${element_c}, cutlass::layout::RowMajor, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + cutlass::epilogue::thread::LinearCombinationPlanarComplex< + ${element_c}, + ${alignment_c}, + ${element_accumulator}, + ${element_epilogue} + >, + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, + ${stages}, + ${math_operator} + >::GemmArrayKernel; + + struct ${operation_name} : public Operation_${operation_name} { }; +""" + + # + def instance_template(self): + return """ +${compile_guard_start} + manifest.append(new ${gemm_kind}< + cutlass::gemm::device::GemmUniversalAdapter<${operation_name}> + >("${operation_name}")); +${compile_guard_end} +""" + + # + def emit(self, operation): + + warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)] + + # exchange and transpose A and B types, layouts, and complex transforms since the C layout is row-major + transposed_layout_A = TransposedLayout[operation.A.layout] + transposed_layout_B = TransposedLayout[operation.B.layout] + + values = { + 'operation_name': operation.procedural_name(), + 'element_a': DataTypeTag[operation.B.element], + 'layout_a': LayoutTag[transposed_layout_B], + 'transform_a': ComplexTransformTag[operation.B.complex_transform], + 'alignment_a': str(operation.B.alignment), + 'element_b': DataTypeTag[operation.A.element], + 'layout_b': LayoutTag[transposed_layout_A], + 'transform_b': ComplexTransformTag[operation.A.complex_transform], + 'alignment_b': str(operation.A.alignment), + 'element_c': DataTypeTag[operation.C.element], + 'layout_c': LayoutTag[operation.C.layout], + 'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator], + 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + 'arch': "cutlass::arch::Sm%d" % operation.arch, + 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), + 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), + 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), + 'warp_shape_m': str(warp_shape[0]), + 'warp_shape_n': str(warp_shape[1]), + 'warp_shape_k': str(warp_shape[2]), + 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), + 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), + 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), + 'alignment_c': str(operation.C.alignment), + 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), + 'stages': str(operation.tile_description.stages), + 'math_operator': 'cutlass::arch::OpMultiplyAdd' + } + + return SubstituteTemplate(self.template, values) + +################################################################################################### + +# +class EmitGemmGroupedInstance: + ''' Responsible for emitting a CUTLASS template definition''' + + def __init__(self, operation_suffix = ''): + self.operation_suffix = operation_suffix + self.includes = [ + "cutlass/cutlass.h", + "cutlass/numeric_types.h", + "cutlass/arch/arch.h", + "cutlass/arch/mma.h", + "cutlass/layout/matrix.h", + "cutlass/gemm/device/gemm.h", + "cutlass/gemm/kernel/gemm_grouped.h", + "cutlass/gemm/kernel/default_gemm_grouped.h", + "cutlass/gemm/device/gemm_grouped.h" + ] + self.builtin_epilogue_functor_template = """ + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + > +""" + self.gemm_template = """ +// Gemm operator ${operation_name} +using ${operation_name}_base = + typename cutlass::gemm::kernel::DefaultGemmGrouped< + ${element_a}, ${layout_a}, ${transform_a}, ${align_a}, + ${element_b}, ${layout_b}, ${transform_b}, ${align_b}, + ${element_c}, ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}, + ${swizzling_functor}, + ${stages}, + ${scheduler_mode}, + ${math_operation} +>::GemmKernel; + +// Define named type +struct ${operation_name}${operation_suffix} : + public ${operation_name}_base { }; +""" + + # + def instance_template(self): + return """ +${compile_guard_start} + manifest.append(new ${gemm_kind}< + cutlass::gemm::device::GemmGrouped<${operation_name}> + >("${operation_name}")); +${compile_guard_end} +""" + + # + def emit(self, operation): + + threadblock_shape = operation.tile_description.threadblock_shape + warp_count = operation.tile_description.warp_count + + warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)] + + transpose_layouts = { + LayoutType.ColumnMajor: LayoutType.RowMajor, + LayoutType.RowMajor: LayoutType.ColumnMajor + } + + instance_layout_A, instance_layout_B, instance_layout_C = \ + (operation.A.layout, operation.B.layout, operation.C.layout) + # + + # Support built-in epilogue functors or user-defined functions + if isinstance(operation.epilogue_functor, enum.Enum): + + epilogue_vector_length = \ + min(operation.C.alignment * DataTypeSize[operation.C.element], 128) // DataTypeSize[operation.C.element] + + values = { + 'epilogue_vector_length': str(epilogue_vector_length), + 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), + 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], + } + epilogue_functor = SubstituteTemplate(self.builtin_epilogue_functor_template, values) + else: + epilogue_functor = self.epilogue_functor.emit_declaration() + # + + values = { + 'operation_name': operation.procedural_name(), + 'operation_suffix': self.operation_suffix, + 'element_a': DataTypeTag[operation.A.element], + 'layout_a': LayoutTag[instance_layout_A], + 'element_b': DataTypeTag[operation.B.element], + 'layout_b': LayoutTag[instance_layout_B], + 'element_c': DataTypeTag[operation.C.element], + 'layout_c': LayoutTag[instance_layout_C], + 'element_accumulator': DataTypeTag[operation.accumulator_type()], + 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + 'arch': "cutlass::arch::Sm%d" % operation.arch, + 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), + 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), + 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), + 'warp_shape_m': str(warp_shape[0]), + 'warp_shape_n': str(warp_shape[1]), + 'warp_shape_k': str(warp_shape[2]), + 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), + 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), + 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), + 'epilogue_functor': epilogue_functor, + 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], + 'stages': str(operation.tile_description.stages), + 'align_a': str(operation.A.alignment), + 'align_b': str(operation.B.alignment), + 'transform_a': ComplexTransformTag[operation.A.complex_transform], + 'transform_b': ComplexTransformTag[operation.B.complex_transform], + 'scheduler_mode': GroupScheduleModeTag[operation.scheduler_mode], + 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation] + } + + return SubstituteTemplate(self.gemm_template, values) + +################################################################################################### +# +# Emitters functions for all targets +# +################################################################################################### + +class EmitGemmConfigurationLibrary: + def __init__(self, operation_path, configuration_name): + self.configuration_name = configuration_name + self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/') + + self.instance_emitter = { + GemmKind.Gemm: EmitGemmInstance, + GemmKind.Sparse: EmitSparseGemmInstance, + GemmKind.Universal: EmitGemmUniversalInstance, + GemmKind.Universal3x: EmitGemmUniversal3xInstance, + GemmKind.PlanarComplex: EmitGemmPlanarComplexInstance, + GemmKind.PlanarComplexArray: EmitGemmPlanarComplexArrayInstance, + GemmKind.Grouped: EmitGemmGroupedInstance + } + + self.gemm_kind_wrappers = { + GemmKind.Gemm: 'GemmOperation', + GemmKind.Sparse: 'GemmSparseOperation', + GemmKind.Universal: 'GemmUniversalOperation', + GemmKind.Universal3x: 'GemmUniversal3xOperation', + GemmKind.PlanarComplex: 'GemmPlanarComplexOperation', + GemmKind.PlanarComplexArray: 'GemmPlanarComplexArrayOperation', + GemmKind.Grouped: 'GemmGroupedOperation' + } + + self.wmma_guard_start = "#if defined(CUTLASS_ARCH_WMMA_SM${sm_number}_ENABLED)" + + self.separator = """ +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + + self.header_template = """ +/* + Generated by gemm_operation.py - Do not edit. +*/ +""" + + self.initialize_function_template = """ + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_${configuration_name}(Manifest &manifest) { + +""" + self.epilogue_template = """ + +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + + def __enter__(self): + self.configuration_file = open(self.configuration_path, "w") + self.configuration_file.write(self.header_template) + self.configuration_file.write(self.separator) + + self.includes = collections.OrderedDict([ + ("cutlass/cutlass.h", None), + ("cutlass/library/library.h", None), + ("cutlass/library/manifest.h", None), + ("library_internal.h", None), + ("gemm_operation.h", None), + ("gemm_operation_3x.hpp", None), + ("cutlass/arch/wmma.h", None), + ("cutlass/numeric_types.h", None) + ]) + self.instance_definitions = [] + self.instance_wrappers = [] + + self.operations = [] + return self + + def emit(self, operation): + emitter = self.instance_emitter[operation.gemm_kind]() + + for incl in emitter.includes: + self.includes[incl] = None + + self.operations.append(operation) + + self.instance_definitions.append(emitter.emit(operation)) + + self.instance_wrappers.append(SubstituteTemplate(emitter.instance_template(), { + 'configuration_name': self.configuration_name, + 'operation_name': operation.procedural_name(), + 'gemm_kind': self.gemm_kind_wrappers[operation.gemm_kind], + 'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \ + if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "", + 'compile_guard_end': "#endif" \ + if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "" + })) + + def __exit__(self, exception_type, exception_value, traceback): + + # Write includes + for incl, _ in self.includes.items(): + include_statement = "#include \"%s\"\n" % incl + self.configuration_file.write(include_statement) + + self.configuration_file.write(self.separator) + + # Write instance definitions in top-level namespace + for instance_definition in self.instance_definitions: + self.configuration_file.write(instance_definition) + + # Add wrapper objects within initialize() function + self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, { + 'configuration_name': self.configuration_name + })) + + for instance_wrapper in self.instance_wrappers: + self.configuration_file.write(instance_wrapper) + + self.configuration_file.write(self.epilogue_template) + self.configuration_file.close() + +################################################################################################### +################################################################################################### + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/generator.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/generator.py new file mode 100644 index 0000000000000000000000000000000000000000..facd5d960c26370e56f6dbde411e451a1754f009 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/generator.py @@ -0,0 +1,5382 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Utilities for enumerating CUTLASS library kernels +""" + +import enum +import os.path +import shutil +import argparse +import logging + +from cutlass_library.library import * +from cutlass_library.manifest import * +from itertools import product + +################################################################################################### + +# +def CudaToolkitVersionSatisfies(semantic_ver_string, major, minor, patch = 0): + + # by default, use the latest CUDA Toolkit version + cuda_version = [11, 0, 132] + + # Update cuda_version based on parsed string + if semantic_ver_string != '': + for i, x in enumerate([int(x) for x in semantic_ver_string.split('.')]): + if i < len(cuda_version): + cuda_version[i] = x + else: + cuda_version.append(x) + return cuda_version >= [major, minor, patch] + + +################################################################################################### +################################################################################################### + +# +def EpilogueAlignment(max_alignment, tile, epilogue_steps = 8): + ''' Helper to compute the maximum alignment of the epilogue ''' + + def product(X, identity = 1): + result = identity + for item in X: + result *= item + return result + + elements_per_thread = product(tile.threadblock_shape[:-1]) // product(tile.warp_count) // 32 // epilogue_steps + return min(max_alignment, elements_per_thread) + +def DefaultSwizzlingFunctor(): + return SwizzlingFunctor.Identity8; + # To use StreamK decomposition for basic GEMMs, set `swizzling_functor = SwizzlingFunctor.StreamK` + +# +def CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, \ + alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \ + swizzling_functor = DefaultSwizzlingFunctor()): + + if complex_transforms is None: + complex_transforms = [(ComplexTransform.none, ComplexTransform.none),] + + element_a, element_b, element_c, element_epilogue = data_type + + operations = [] + + # by default, only generate the largest tile and largest alignment + if manifest.kernel_filter == '': + tile_descriptions = [tile_descriptions[0],] + alignment_constraints = [alignment_constraints[0],] + + for layout in layouts: + for tile_description in tile_descriptions: + for alignment in alignment_constraints: + for complex_transform in complex_transforms: + + alignment_c = min(8, alignment) + + A = TensorDescription(element_a, layout[0], alignment, complex_transform[0]) + B = TensorDescription(element_b, layout[1], alignment, complex_transform[1]) + C = TensorDescription(element_c, layout[2], alignment_c) + + new_operation = GemmOperation(GemmKind.Universal, tile_description.minimum_compute_capability, \ + tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor) + + manifest.append(new_operation) + operations.append(new_operation) + + return operations + + +# Generates 3.0 API based GemmUniversal API kernels. Alignment constraints are folded in with layouts +def CreateGemmUniversal3xOperator( + manifest, layouts, tile_descriptions, data_types, + schedules = [[KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto]], + complex_transforms=None, + epilogue_functor=EpilogueFunctor.LinearCombination, + swizzling_functor=SwizzlingFunctor.Identity1, + tile_schedulers=[TileSchedulerType.Persistent]): + + if type(data_types) is dict: + data_types = [data_types] + + for s in schedules: + assert(len(s) == 2) + + if complex_transforms is None: + complex_transforms = [(ComplexTransform.none, ComplexTransform.none), ] + + operations = [] + + # by default, only generate the largest tile and largest alignment + if manifest.kernel_filter == '': + tile_descriptions = [tile_descriptions[0]] + + combinations = product(layouts, tile_descriptions, data_types, complex_transforms, schedules, tile_schedulers) + for layout, tile_description, data_type, complex_transform, schedules, tile_scheduler in combinations: + kernel_schedule, epilogue_schedule = schedules + A = TensorDescription( + data_type["a_type"], layout[0][0], layout[0][1], complex_transform[0]) + B = TensorDescription( + data_type["b_type"], layout[1][0], layout[1][1], complex_transform[1]) + + C = TensorDescription(data_type["c_type"], layout[2][0], layout[2][1]) + D = TensorDescription(data_type["d_type"], layout[2][0], layout[2][1]) + + element_compute = data_type.get("epi_type", data_type["acc_type"]) + operation = GemmOperation( + GemmKind.Universal3x, tile_description.minimum_compute_capability, + tile_description, A, B, C, element_compute, epilogue_functor, swizzling_functor, D, + kernel_schedule, epilogue_schedule, tile_scheduler) + + manifest.append(operation) + operations.append(operation) + + return operations + +# +def CreateSparseGemmOperator(manifest, layouts, tile_descriptions, data_type, \ + alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \ + swizzling_functor = SwizzlingFunctor.Identity8): + + if complex_transforms is None: + complex_transforms = [(ComplexTransform.none, ComplexTransform.none),] + + element_a, element_b, element_c, element_epilogue = data_type + + gemm_kinds = [GemmKind.Sparse] + + operations = [] + + # by default, only generate the largest tile and largest alignment + if manifest.kernel_filter == '': + tile_descriptions = [tile_descriptions[0],] + alignment_constraints = [alignment_constraints[0],] + + for layout in layouts: + for tile_description in tile_descriptions: + for alignment in alignment_constraints: + for complex_transform in complex_transforms: + + alignment_c = min(8, alignment) + + A = TensorDescription(element_a, layout[0], alignment, complex_transform[0]) + B = TensorDescription(element_b, layout[1], alignment, complex_transform[1]) + C = TensorDescription(element_c, layout[2], alignment_c) + + new_operation = GemmOperation(GemmKind.Sparse, tile_description.minimum_compute_capability, \ + tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor) + + manifest.append(new_operation) + operations.append(new_operation) + + return operations + +# +def CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, data_type, \ + alignment_constraints, complex_transforms): + + if complex_transforms is None: + complex_transforms = [(ComplexTransform.none, ComplexTransform.none),] + + element_a, element_b, element_c, element_epilogue = data_type + + gemm_kinds = [GemmKind.PlanarComplex, GemmKind.PlanarComplexArray] + + # by default, only generate the largest tile and largest alignment + if manifest.kernel_filter == '': + tile_descriptions = [tile_descriptions[0],] + alignment_constraints = [alignment_constraints[0],] + + for gemm_kind in gemm_kinds: + for layout in layouts: + for tile_description in tile_descriptions: + for alignment in alignment_constraints: + for complex_transform in complex_transforms: + + alignment_c = min(8, alignment) + + A = TensorDescription(element_a, layout[0], alignment, complex_transform[0]) + B = TensorDescription(element_b, layout[1], alignment, complex_transform[1]) + C = TensorDescription(element_c, layout[2], alignment_c) + + manifest.append(GemmOperation(gemm_kind, \ + tile_description.minimum_compute_capability, \ + tile_description, A, B, C, element_epilogue)) + return + +# +def CreateGemmGroupedOperator(manifest, layouts, tile_descriptions, data_type, \ + alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \ + swizzling_functor = SwizzlingFunctor.Identity8): + + if complex_transforms is None: + complex_transforms = [(ComplexTransform.none, ComplexTransform.none),] + + element_a, element_b, element_c, element_epilogue = data_type + + operations = [] + + # by default, only generate the largest tile and largest alignment + if manifest.kernel_filter == '': + tile_descriptions = [tile_descriptions[0],] + alignment_constraints = [alignment_constraints[0],] + + for layout in layouts: + for tile_description in tile_descriptions: + for alignment in alignment_constraints: + for complex_transform in complex_transforms: + + alignment_c = min(8, alignment) + + A = TensorDescription(element_a, layout[0], alignment, complex_transform[0]) + B = TensorDescription(element_b, layout[1], alignment, complex_transform[1]) + C = TensorDescription(element_c, layout[2], alignment_c) + + new_operation = GroupedGemmOperation(GemmKind.Grouped, tile_description.minimum_compute_capability, \ + tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor) + + manifest.append(new_operation) + operations.append(new_operation) + + return operations + +# +def CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, data_type, \ + alignment_constraints, blas_mode, epilogue_functor = EpilogueFunctor.LinearCombination, \ + swizzling_functor = SwizzlingFunctor.Identity8): + + element_a, element_c, element_epilogue = data_type + + operations = [] + + # by default, only generate the largest tile and largest alignment + if manifest.kernel_filter == '': + tile_descriptions = [tile_descriptions[0],] + alignment_constraints = [alignment_constraints[0],] + + for layout in layouts: + for fill_mode in fill_modes: + for tile_description in tile_descriptions: + for alignment in alignment_constraints: + + # SERK supported layouts (RowMajor, ColumnMajor) with no conjugation + complex_transform = ComplexTransform.none + + # HERK supported layouts (RowMajor + conj, ColumnMajor) + if blas_mode == BlasMode.hermitian and layout[0] == LayoutType.RowMajor: + complex_transform = ComplexTransform.conj + + alignment_c = 1 # Alignment only applies to A in SYRK + + A = TensorDescription(element_a, layout[0], alignment, complex_transform) + C = SymmetricTensorDescription(element_c, layout[1], fill_mode, alignment_c) + + # Rank-K update + new_operation = RankKOperation(RankKKind.Universal, tile_description.minimum_compute_capability, \ + tile_description, A, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode) + + manifest.append(new_operation) + operations.append(new_operation) + + # Rank-2K update + new_operation = Rank2KOperation(RankKKind.Universal, tile_description.minimum_compute_capability, \ + tile_description, A, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode) + + manifest.append(new_operation) + operations.append(new_operation) + + return operations + +# +def CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, data_type, \ + alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \ + swizzling_functor = SwizzlingFunctor.Identity8): + + if complex_transforms is None: + complex_transforms = [(ComplexTransform.none),] + + element_a, element_b, element_c, element_epilogue = data_type + + operations = [] + + # by default, only generate the largest tile and largest alignment + if manifest.kernel_filter == '': + tile_descriptions = [tile_descriptions[0],] + alignment_constraints = [alignment_constraints[0],] + + for layout in layouts: + for side_mode in side_modes: + for fill_mode in fill_modes: + for diag_type in diag_types: + for tile_description in tile_descriptions: + for alignment in alignment_constraints: + for complex_transform in complex_transforms: + + alignment_c = min(8, alignment) + + A = TriangularTensorDescription(element_a, layout[0], side_mode, fill_mode, diag_type, + alignment, complex_transform) + B = TensorDescription(element_b, layout[1], alignment) + C = TensorDescription(element_c, layout[2], alignment_c) + + new_operation = TrmmOperation(TrmmKind.Universal, tile_description.minimum_compute_capability, \ + tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor) + + manifest.append(new_operation) + operations.append(new_operation) + + return operations + +# +def CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, data_type, \ + alignment_constraints, blas_mode, epilogue_functor = EpilogueFunctor.LinearCombination, \ + swizzling_functor = SwizzlingFunctor.Identity8): + + element_a, element_b, element_c, element_epilogue = data_type + + operations = [] + + # by default, only generate the largest tile and largest alignment + if manifest.kernel_filter == '': + tile_descriptions = [tile_descriptions[0],] + alignment_constraints = [alignment_constraints[0],] + + for layout in layouts: + for side_mode in side_modes: + for fill_mode in fill_modes: + for tile_description in tile_descriptions: + for alignment in alignment_constraints: + + # SYMM supported layouts (RowMajor, ColumnMajor) with no conjugation + complex_transform = ComplexTransform.none + + alignment_a = 1 # No vectorized access for the triangular matrix + alignment_c = min(8, alignment) + + A = SymmetricTensorDescription(element_a, layout[0], fill_mode, alignment_a, complex_transform, side_mode) + # tensor A and B have same data type and layout + B = TensorDescription(element_b, layout[0], alignment) + C = TensorDescription(element_c, layout[1], alignment_c) + + # SYMM/HEMM update + new_operation = SymmOperation(SymmKind.Universal, tile_description.minimum_compute_capability, \ + tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode) + + manifest.append(new_operation) + operations.append(new_operation) + + # SYMM/HEMM update + new_operation = SymmOperation(SymmKind.Universal, tile_description.minimum_compute_capability, \ + tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode) + + manifest.append(new_operation) + operations.append(new_operation) + + return operations + +########################################################################################################### +# ConvolutionOperator support variations +# ____________________________________________________________________ +# ConvolutionalOperator | Analytic | Optimized +# ____________________________________________________________________ +# | Fprop | (strided) | (strided) +# | Dgrad | (strided, unity*) | (strided, unity) +# | Wgrad | (strided) | (strided) +# ____________________________________________________________________ +# +# Note : Operator marked (*) are supported but not generated to keep the instantiated kernel count low +########################################################################################################### +# Convolution for 2D operations +def CreateConv2dOperator(manifest, layout, tile_descriptions, data_type, alignment_constraints, \ + conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \ + epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4): + + element_a, element_b, element_c, element_epilogue = data_type + + # one exceptional case + + # iterator algorithm (analytic and optimized) + iterator_algorithms = [IteratorAlgorithm.Analytic, IteratorAlgorithm.Optimized] + + # by default, only generate the largest tile size, largest alignment, and optimized iterator + if manifest.kernel_filter == '': + tile_descriptions = [tile_descriptions[0],] + alignment_constraints = [alignment_constraints[0],] + iterator_algorithms = [IteratorAlgorithm.Optimized] + + operations = [] + + for tile in tile_descriptions: + for alignment in alignment_constraints: + + alignment_c = min(8, alignment) + + A = TensorDescription(element_a, layout[0], alignment) + B = TensorDescription(element_b, layout[1], alignment) + C = TensorDescription(element_c, layout[2], alignment_c) + + swizzling_functor_ = swizzling_functor + + # + # Conv2d Fprop + # + if ConvKind.Fprop in conv_kinds: + + # Strided support for Analytic and Optimized Fprop + for iterator_algorithm in iterator_algorithms: + new_operations = [ + # None grouped kernel + Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\ + A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_), + ] + + # Instance group conv kernel + if tile.math_instruction.opcode_class == OpcodeClass.TensorOp and A.layout == LayoutType.TensorNHWC and \ + tile.minimum_compute_capability >= 80: + # SingleGroup kernel + new_operations.append(Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\ + A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_, group_mode=GroupMode.SingleGroup)) + + # Analytic iterator supports MultipleGroup mode + if iterator_algorithm == IteratorAlgorithm.Analytic: + new_operations.append(Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\ + A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_, group_mode=GroupMode.MultipleGroup)) + + for new_operation in new_operations: + manifest.append(new_operation) + operations.append(new_operation) + + # + # Conv2d Dgrad + # + if ConvKind.Dgrad in conv_kinds: + + # Unity stride for Analytic and Optimized Dgrad + for iterator_algorithm in iterator_algorithms: + new_operation = Conv2dOperation(ConvKind.Dgrad, iterator_algorithm, tile.minimum_compute_capability, tile,\ + A, B, C, element_epilogue, StrideSupport.Unity, epilogue_functor, swizzling_functor_) + + manifest.append(new_operation) + operations.append(new_operation) + + # Strided support for Analytic Dgrad + # strided dgrad uses a special threadblock swizzle + # note that SwizzlingFunctor.StridedDgradHorizontal might be + # better for problem sizes with large activation channel count + swizzling_functor_strided_dgrad_ = SwizzlingFunctor.StridedDgradIdentity1 + + if IteratorAlgorithm.Analytic in iterator_algorithms: + new_operation = Conv2dOperation(ConvKind.Dgrad, IteratorAlgorithm.Analytic, tile.minimum_compute_capability, tile,\ + A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_strided_dgrad_) + + manifest.append(new_operation) + operations.append(new_operation) + + # Strided support for Optimized Dgrad + if IteratorAlgorithm.Optimized in iterator_algorithms: + new_operation = Conv2dOperation(ConvKind.Dgrad, IteratorAlgorithm.Optimized, tile.minimum_compute_capability, tile,\ + A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_strided_dgrad_) + + manifest.append(new_operation) + operations.append(new_operation) + + # + # Conv2d Wgrad + # + if ConvKind.Wgrad in conv_kinds: + + # Strided support for Analytic and Optimized Wgrad + for iterator_algorithm in iterator_algorithms: + new_operation = Conv2dOperation(ConvKind.Wgrad, iterator_algorithm, tile.minimum_compute_capability, tile,\ + A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_) + + manifest.append(new_operation) + operations.append(new_operation) + + return operations + +# Convolution for 2D operations specialized for few channels +def CreateConv2dFixedChannelsOperator(manifest, layout, tile_descriptions, data_type, channel_counts, \ + conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \ + epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4): + + element_a, element_b, element_c, element_epilogue = data_type + + # one exceptional case + + # iterator algorithm (analytic and optimized) + iterator_algorithms = [IteratorAlgorithm.FixedChannels,] + + # by default, only generate the largest tile size, largest alignment, and optimized iterator + if manifest.kernel_filter == '': + tile_descriptions = [tile_descriptions[0],] + channel_counts = [channel_counts[0],] + + operations = [] + + + + for tile in tile_descriptions: + for channel_count in channel_counts: + + alignment_c = EpilogueAlignment(channel_count, tile) + + A = TensorDescription(element_a, layout[0], channel_count) + B = TensorDescription(element_b, layout[1], channel_count) + C = TensorDescription(element_c, layout[2], alignment_c) + + swizzling_functor_ = swizzling_functor + + # + # Conv2d Fprop + # + if ConvKind.Fprop in conv_kinds: + + # Strided support for Analytic and Optimized Fprop + for iterator_algorithm in iterator_algorithms: + new_operation = Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\ + A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_) + + manifest.append(new_operation) + operations.append(new_operation) + + return operations + +# Convolution for 2D operations specialized for few channels +def CreateConv2dFewChannelsOperator(manifest, layout, tile_descriptions, data_type, channel_counts, \ + conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \ + epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4): + + element_a, element_b, element_c, element_epilogue = data_type + + # one exceptional case + + # iterator algorithm (analytic and optimized) + iterator_algorithms = [IteratorAlgorithm.FewChannels,] + + # by default, only generate the largest tile size, largest alignment, and optimized iterator + if manifest.kernel_filter == '': + tile_descriptions = [tile_descriptions[0],] + channel_counts = [channel_counts[0],] + + operations = [] + + for tile in tile_descriptions: + for channel_count in channel_counts: + + alignment_c = EpilogueAlignment(channel_count, tile) + + A = TensorDescription(element_a, layout[0], channel_count) + B = TensorDescription(element_b, layout[1], channel_count) + C = TensorDescription(element_c, layout[2], alignment_c) + + swizzling_functor_ = swizzling_functor + + # + # Conv2d Fprop + # + if ConvKind.Fprop in conv_kinds: + + # Strided support for Analytic and Optimized Fprop + for iterator_algorithm in iterator_algorithms: + new_operation = Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\ + A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_) + + manifest.append(new_operation) + operations.append(new_operation) + + return operations + +# Convolution for 3D operations +def CreateConv3dOperator(manifest, layout, tile_descriptions, data_type, alignment, \ + conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], epilogue_functor = EpilogueFunctor.LinearCombination): + + element_a, element_b, element_c, element_epilogue = data_type + + # one exceptional case + alignment_c = min(8, alignment) + + # iterator algorithm (analytic and optimized) + iterator_algorithms = [IteratorAlgorithm.Analytic, IteratorAlgorithm.Optimized] + + # by default, only generate the largest tile size and optimized iterators + if manifest.kernel_filter == '': + tile_descriptions = [tile_descriptions[0],] + iterator_algorithms = [IteratorAlgorithm.Optimized] + + operations = [] + + # All tile sizes for Conv3dFprop and Conv3dWgrad + for tile in tile_descriptions: + A = TensorDescription(element_a, layout, alignment) + B = TensorDescription(element_b, layout, alignment) + C = TensorDescription(element_c, layout, alignment_c) + + # + # Conv3d Fprop + # + if ConvKind.Fprop in conv_kinds: + # Strided support for Analytic and Optimized Fprop + for iterator_algorithm in iterator_algorithms: + new_operation = Conv3dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\ + A, B, C, element_epilogue, StrideSupport.Strided) + manifest.append(new_operation) + operations.append(new_operation) + # + # Conv3d Wgrad + # + if ConvKind.Wgrad in conv_kinds: + + # Strided support for Analytic and Optimized Wgrad + for iterator_algorithm in iterator_algorithms: + new_operation = Conv3dOperation(ConvKind.Wgrad, iterator_algorithm, tile.minimum_compute_capability, tile,\ + A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor) + manifest.append(new_operation) + operations.append(new_operation) + + # All tile sizes for Conv3dDgrad + for tile in tile_descriptions: + + A = TensorDescription(element_a, layout, alignment) + B = TensorDescription(element_b, layout, alignment) + C = TensorDescription(element_c, layout, alignment_c) + + # + # Conv3d Dgrad + # + if ConvKind.Dgrad in conv_kinds: + # Unity stride for Optimized Dgrad + new_operation = Conv3dOperation(ConvKind.Dgrad, IteratorAlgorithm.Optimized, tile.minimum_compute_capability, tile,\ + A, B, C, element_epilogue, StrideSupport.Unity, epilogue_functor) + + manifest.append(new_operation) + operations.append(new_operation) + + # Strided support for Analytic Dgrad + # Conv3dDgrad has a naive strided support which does not cut down redundant MMAs + new_operation = Conv3dOperation(ConvKind.Dgrad, IteratorAlgorithm.Analytic, tile.minimum_compute_capability, tile,\ + A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor) + + manifest.append(new_operation) + operations.append(new_operation) + + return operations + +# Convolution for Depthwise 2d conv +def CreateDepthwiseConv2dOperator(manifest, layout, tile_descriptions, data_type, alignment_constraints, \ + conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \ + epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4): + + element_a, element_b, element_c, element_epilogue = data_type + + # iterator algorithm (FixedStrideDilation, Optimized) + iterator_algorithms = [IteratorAlgorithm.FixedStrideDilation, IteratorAlgorithm.Optimized] + + # by default, only generate the largest tile size, largest alignment, and optimized iterator + if manifest.kernel_filter == '': + tile_descriptions = [tile_descriptions[0],] + alignment_constraints = [alignment_constraints[0],] + + operations = [] + + for tile in tile_descriptions: + for alignment in alignment_constraints: + + alignment_c = min(8, alignment) + + A = TensorDescription(element_a, layout[0], alignment) + B = TensorDescription(element_b, layout[1], alignment) + C = TensorDescription(element_c, layout[2], alignment_c) + + swizzling_functor_ = swizzling_functor + + if ConvKind.Fprop in conv_kinds: + + # Strided support for Optimized and FixedStridedDilation Depthwise Conv + for iterator_algorithm in iterator_algorithms: + stride_support = StrideSupport.Strided + if iterator_algorithm == IteratorAlgorithm.FixedStrideDilation: + if tile.stride == [-1, -1] or tile.dilation == [-1,-1]: + continue + stride_support = StrideSupport.Fixed + + if iterator_algorithm == IteratorAlgorithm.Optimized: + if tile.stride != [-1, -1] or tile.dilation != [-1,-1]: + continue + new_operation = Conv2dOperation(ConvKind.Fprop, + iterator_algorithm, + tile.minimum_compute_capability, + tile, + A, B, C, + element_epilogue, + stride_support, + epilogue_functor, + swizzling_functor_, + group_mode=GroupMode.Depthwise) + + manifest.append(new_operation) + operations.append(new_operation) + + return operations + +################################################################################################### +################################################################################################### + +# +def GenerateSM50_Simt(manifest, cuda_version): + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [1, 1, 1], \ + DataType.f32, DataType.f32, DataType.f32, \ + OpcodeClass.Simt, \ + MathOperation.multiply_add), + MathInstruction( \ + [1, 1, 1], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.Simt, \ + MathOperation.multiply_add), + ] + + min_cc = 50 + max_cc = 1024 + + alignment_constraints = [1,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([128, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([128, 32, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 32, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + + if math_inst.element_a == DataType.f32: + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints) +# + +# +def GenerateSM50_Simt_complex(manifest, cuda_version): + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [1, 1, 1], \ + DataType.f32, DataType.f32, DataType.f32, \ + OpcodeClass.Simt, \ + MathOperation.multiply_add_complex), + ] + + min_cc = 50 + max_cc = 1024 + + alignment_constraints = [1,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([128, 64, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([128, 32, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 32, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + DataType.cf32, + DataType.cf32, + DataType.cf32, + DataType.cf32, + ] + + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints) +# + +# +def GenerateSM50(manifest, cuda_version): + GenerateSM50_Simt(manifest, cuda_version) + GenerateSM50_Simt_complex(manifest, cuda_version) + +################################################################################################### +################################################################################################### + +# +def GenerateSM60_Simt(manifest, cuda_version): + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [1, 1, 1], \ + DataType.f16, DataType.f16, DataType.f16, \ + OpcodeClass.Simt, \ + MathOperation.multiply_add), + ] + + min_cc = 60 + max_cc = 1024 + + alignment_constraints = [1,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([128, 32, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 32, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) +# +def GenerateSM60_Simt_DepthwiseConv2d(manifest, cuda_version): + + math_instructions = [ + MathInstruction( \ + [1, 1, 1], \ + DataType.f16, DataType.f16, DataType.f16, \ + OpcodeClass.Simt, \ + MathOperation.multiply_add), + ] + + min_cc = 60 + max_cc = 1024 + + alignment_constraints = [8,] + + filter_3x3 = [3, 3] + filter_5x5 = [5, 5] + + # [stride_h, stride_w] + # [-1, -1] means all stride size. + strides = [[-1,-1], [1, 1], [2, 2]] + # [dilation_h, dilation_w] + # [-1, -1] means all dilation size. + dilations = [[-1,-1], [1, 1], [2, 2]] + + #groups per thread block + g16 = 16 + g32 = 32 + g64 = 64 + + #output shape per thread block + npq_1x4x4 = [1, 4, 4] + npq_1x8x8 = [1, 8, 8] + npq_1x10x10 = [1, 10, 10] + + tile_descriptions = [] + for math_inst in math_instructions: + for stride, dilation in product(strides, dilations): + tile_descriptions.extend([ + # filter3x3 ThreadBlock_output, filter, stage, warp + Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g32], filter_3x3, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc), + Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g64], filter_3x3, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc), + Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g16], filter_3x3, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc), + + Direct2dConvFixedStrideDilationTileDescription(npq_1x10x10+[g64], filter_3x3, 2, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc), + + Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g32], filter_3x3, 4, stride, dilation, [4, 1, 1], math_inst, min_cc, max_cc), + Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g64], filter_3x3, 4, stride, dilation,[4, 1, 1], math_inst, min_cc, max_cc), + Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g16], filter_3x3, 4, stride, dilation, [4, 1, 1], math_inst, min_cc, max_cc), + + # filter5x5 ThreadBlock_output, filter, stage, warp + Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g32], filter_5x5, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc), + Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g64], filter_5x5, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc), + Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g16], filter_5x5, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc), + + Direct2dConvFixedStrideDilationTileDescription(npq_1x10x10+[g64], filter_5x5, 2, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc), + + Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g32], filter_5x5, 4, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc), + Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g64], filter_5x5, 4, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc), + Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g16], filter_5x5, 4, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc) + ]) + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + CreateDepthwiseConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints) +# + +# +def GenerateSM60(manifest, cuda_version): + GenerateSM60_Simt(manifest, cuda_version) + GenerateSM60_Simt_DepthwiseConv2d(manifest, cuda_version) + +################################################################################################### +################################################################################################### + +# +def GenerateSM61_Simt(manifest, cuda_version): + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [1, 1, 4], \ + DataType.s8, DataType.s8, DataType.s32, \ + OpcodeClass.Simt, \ + MathOperation.multiply_add), + ] + + min_cc = 61 + max_cc = 1024 + + alignment_constraints = [1,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([128, 32, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 32, 128, 32], 2, [1, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + math_inst.element_accumulator, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp) +# + +# +def GenerateSM61(manifest, cuda_version): + GenerateSM61_Simt(manifest, cuda_version) + +################################################################################################### +################################################################################################### + +# +def GenerateSM70_TensorOp_884(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 10, 1): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [8, 8, 4], \ + DataType.f16, DataType.f16, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [8, 8, 4], \ + DataType.f16, DataType.f16, DataType.f16, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + ] + + min_cc = 70 + max_cc = 75 + + alignment_constraints = [8, 4, 2, 1] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 32], 2, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 32], 2, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints) + + # Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation) + if math_inst.element_a != math_inst.element_accumulator: + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + math_inst.element_accumulator, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints) + + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints) + +# +def GenerateSM70_PlanarComplexTensorOp_884(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 10, 1): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + complex_transforms = [ + (ComplexTransform.none, ComplexTransform.none), + (ComplexTransform.conj, ComplexTransform.none), + (ComplexTransform.none, ComplexTransform.conj), + (ComplexTransform.conj, ComplexTransform.conj) + ] + + math_instructions = [ + MathInstruction( \ + [8, 8, 4], \ + DataType.f16, DataType.f16, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [8, 8, 4], \ + DataType.f16, DataType.f16, DataType.f16, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + ] + + min_cc = 70 + max_cc = 75 + + alignment_constraints = [8, 2, 1] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, complex_transforms) + + # Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation) + if math_inst.element_a != math_inst.element_accumulator: + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + math_inst.element_accumulator, + ] + + CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints, complex_transforms) + + +# +def GenerateSM70_WmmaTensorOp_161616(manifest, cuda_version): + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [16, 16, 16], \ + DataType.f16, DataType.f16, DataType.f32, \ + OpcodeClass.WmmaTensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [16, 16, 16], \ + DataType.f16, DataType.f16, DataType.f16, \ + OpcodeClass.WmmaTensorOp, \ + MathOperation.multiply_add), + ] + + min_cc = 70 + max_cc = 1024 + + alignment_constraints = [8,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + + # Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation) + if math_inst.element_a != math_inst.element_accumulator: + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + math_inst.element_accumulator, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints) + +# +################################################################################################## +# + +def GenerateSM70(manifest, cuda_version): + GenerateSM70_TensorOp_884(manifest, cuda_version) + GenerateSM70_PlanarComplexTensorOp_884(manifest, cuda_version) + + # To limit build size, WMMA GEMMs are disabled for now. + # + #GenerateSM70_WmmaTensorOp_161616(manifest, cuda_version) + +################################################################################################### +################################################################################################### + +# +def GenerateSM75_TensorOp_1688_FewChannels(manifest, cuda_version, math_inst): + + min_cc = 75 + max_cc = 1024 + + tile_descriptions = [ + TileDescription([128, 64, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 64], 2, [2, 2, 2], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + + CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type, [4, 8]) + CreateConv2dFewChannelsOperator(manifest, conv_layout, tile_descriptions, data_type, [1, 2, 4]) + + # Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation) + if math_inst.element_a != math_inst.element_accumulator: + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + math_inst.element_accumulator, + ] + + CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, [4, 8]) + CreateConv2dFewChannelsOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, [1, 2, 4]) + +# +def GenerateSM75_TensorOp_1688(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 10, 2): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 8], \ + DataType.f16, DataType.f16, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [16, 8, 8], \ + DataType.f16, DataType.f16, DataType.f16, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + ] + + min_cc = 75 + max_cc = 1024 + + alignment_constraints = [8, 4, 2, 1] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 32], 2, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 32], 2, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 64], 2, [1, 2, 2], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints) + + # Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation) + if math_inst.element_a != math_inst.element_accumulator: + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + math_inst.element_accumulator, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints) + + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints) + + # Separate generator for 'few channels' specializations + GenerateSM75_TensorOp_1688_FewChannels(manifest, cuda_version, math_inst) + +# + +# +def GenerateSM75_PlanarComplexTensorOp_1688(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 10, 2): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + complex_transforms = [ + (ComplexTransform.none, ComplexTransform.none), + (ComplexTransform.conj, ComplexTransform.none), + (ComplexTransform.none, ComplexTransform.conj), + (ComplexTransform.conj, ComplexTransform.conj) + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 8], \ + DataType.f16, DataType.f16, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [16, 8, 8], \ + DataType.f16, DataType.f16, DataType.f16, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + ] + + min_cc = 75 + max_cc = 1024 + + alignment_constraints = [8, 2, 1] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([ 64, 128, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, complex_transforms) + + # Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation) + if math_inst.element_a != math_inst.element_accumulator: + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + math_inst.element_accumulator, + ] + + CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints, complex_transforms) + +# +def GenerateSM75_TensorOp_8816_TN(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 10, 2): + return + + layouts = [ + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [8, 8, 16], \ + DataType.s8, DataType.s8, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + MathInstruction( \ + [8, 8, 16], \ + DataType.u8, DataType.u8, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + ] + + min_cc = 75 + max_cc = 90 + + alignment_constraints = [16,] + alignment_constraints_small_channels = [16, 8, 4] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 64], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 64], 2, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 64], 2, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 32, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 32, 256, 64], 2, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 32, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 32, 64], 2, [2, 1, 1], math_inst, min_cc, max_cc), + + TileDescription([256, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 32], 2, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 32], 2, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 32, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 32, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + DataType.s32, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination) + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, + data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination) + + # Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation) + if math_inst.element_a != math_inst.element_accumulator: + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + DataType.f32, + ] + + operations = [] + + operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp) + + operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions, + data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp) + + operations += CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, + data_type_mixed, alignment_constraints_small_channels, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp) + + operations += CreateConv2dFewChannelsOperator(manifest, conv_layout, tile_descriptions, + data_type_mixed, alignment_constraints_small_channels, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp) + + for op in operations: + if op.tile_description.threadblock_shape[1] >= 128: + op.C.alignment = 16 + else: + op.C.alignment = 8 + +# + +# +def GenerateSM75_TensorOp_8816_Interleaved(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 10, 2): + return + + layouts = [ + (LayoutType.ColumnMajorInterleaved32, LayoutType.RowMajorInterleaved32, LayoutType.ColumnMajorInterleaved32), + ] + + math_instructions = [ + MathInstruction( \ + [8, 8, 16], \ + DataType.s8, DataType.s8, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + MathInstruction( \ + [8, 8, 16], \ + DataType.u8, DataType.u8, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + ] + + min_cc = 75 + max_cc = 90 + + alignment_constraints = [16,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 64], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 64], 2, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 64], 2, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + DataType.f32, + ] + + operations = CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp) + + conv_layout = (LayoutType.TensorNC32HW32, LayoutType.TensorC32RSK32, LayoutType.TensorNC32HW32) + + operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions, + data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp) + + for op in operations: + op.C.alignment = 8 +# + +# +def GenerateSM75_TensorOp_8832_TN(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 10, 2): + return + + layouts = [ + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [8, 8, 32], \ + DataType.s4, DataType.s4, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + MathInstruction( \ + [8, 8, 32], \ + DataType.u4, DataType.u4, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + ] + + min_cc = 75 + max_cc = 89 + + alignment_constraints = [32,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 128], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 128], 2, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 128], 2, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 128], 2, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + DataType.s32, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination) + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, + data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination) + + # Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation) + if math_inst.element_a != math_inst.element_accumulator: + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + DataType.f32, + ] + + operations = [] + + operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp) + + operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions, + data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp) + + for op in operations: + if op.tile_description.threadblock_shape[1] >= 128: + op.C.alignment = 16 + elif op.tile_description.threadblock_shape[1] == 64: + op.C.alignment = 8 + else: + op.C.alignment = 8 + +# + +# +def GenerateSM75_TensorOp_8832_Interleaved(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 10, 2): + return + + layouts = [ + (LayoutType.ColumnMajorInterleaved64, LayoutType.RowMajorInterleaved64, LayoutType.ColumnMajorInterleaved64), + ] + + math_instructions = [ + MathInstruction( \ + [8, 8, 32], \ + DataType.s4, DataType.s4, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + MathInstruction( \ + [8, 8, 32], \ + DataType.u4, DataType.u4, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + ] + + min_cc = 75 + max_cc = 89 + + alignment_constraints = [32,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 128], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 128], 2, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 128], 2, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 128], 2, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + # Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation) + if math_inst.element_a != math_inst.element_accumulator: + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + DataType.f32, + ] + + operations = CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp) + + conv_layout = (LayoutType.TensorNC64HW64, LayoutType.TensorC64RSK64, LayoutType.TensorNC64HW64) + + operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions, + data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp) + + for op in operations: + op.C.alignment = 16 +# + +# +def GenerateSM75_TensorOp_88128(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [8, 8, 128], \ + DataType.b1, DataType.b1, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.xor_popc), + ] + + min_cc = 75 + max_cc = { + MathOperation.xor_popc: 89, + MathOperation.and_popc: 90 + } + + alignment_constraints = [128,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 512], 2, [4, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([128, 256, 512], 2, [2, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([128, 128, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([ 64, 256, 512], 2, [1, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([256, 64, 512], 2, [4, 1, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([ 64, 128, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([128, 64, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([ 64, 64, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + ] + + data_type = [DataType.b1, DataType.b1, DataType.s32, DataType.s32] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + +# + +# +def GenerateSM75_WmmaTensorOp_161616(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 10, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [16, 16, 16], \ + DataType.s8, DataType.s8, DataType.s32, \ + OpcodeClass.WmmaTensorOp, \ + MathOperation.multiply_add), + ] + + min_cc = 75 + max_cc = 1024 + + alignment_constraints = [16,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + DataType.f32, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + + # Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation) + if math_inst.element_a != math_inst.element_accumulator: + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + DataType.f32, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints) +# + +# +def GenerateSM75_Simt_complex(manifest, cuda_version): + math_instructions = [ + MathInstruction( \ + [1, 1, 1], \ + DataType.f32, DataType.f32, DataType.f32, \ + OpcodeClass.Simt, \ + MathOperation.multiply_add_complex), + ] + + min_cc = 75 + max_cc = 1024 + + alignment_constraints = [1,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([128, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc) + ] + data_type = [ + DataType.cf32, + DataType.cf32, + DataType.cf32, + DataType.cf32 + ] + + complex_transforms = [ + (ComplexTransform.none, ComplexTransform.none), + (ComplexTransform.conj, ComplexTransform.none), + (ComplexTransform.none, ComplexTransform.conj), + (ComplexTransform.conj, ComplexTransform.conj) + ] + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints) +# + +def GenerateSM75(manifest, cuda_version): + GenerateSM75_TensorOp_1688(manifest, cuda_version) + GenerateSM75_PlanarComplexTensorOp_1688(manifest, cuda_version) + GenerateSM75_TensorOp_8816_TN(manifest, cuda_version) + GenerateSM75_TensorOp_8816_Interleaved(manifest, cuda_version) + GenerateSM75_TensorOp_8832_TN(manifest, cuda_version) + GenerateSM75_TensorOp_8832_Interleaved(manifest, cuda_version) + GenerateSM75_TensorOp_88128(manifest, cuda_version) + #GenerateSM75_WmmaTensorOp_161616(manifest, cuda_version) + GenerateSM75_Simt_complex(manifest, cuda_version) + + +################################################################################################### +################################################################################################### + +# +def GenerateSM80_TensorOp_16816(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 16], \ + DataType.f16, DataType.f16, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [16, 8, 16], \ + DataType.f16, DataType.f16, DataType.f16, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [16, 8, 16], \ + DataType.bf16, DataType.bf16, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + ] + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [8, 4, 2] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 32], 3, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 32], 10, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 64], 3, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + + CreateGemmGroupedOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints) + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints) + CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type, [4, 8]) + CreateConv3dOperator(manifest, LayoutType.TensorNDHWC, tile_descriptions, data_type, 8) + + # Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation) + if math_inst.element_a != math_inst.element_accumulator: + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + math_inst.element_accumulator, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints) + + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints) + CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, [4, 8]) + CreateConv3dOperator(manifest, LayoutType.TensorNDHWC, tile_descriptions, data_type_mixed, 8) +# + +# +def GenerateSM80_SparseTensorOp_16832(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 1): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.RowMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.RowMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.RowMajor), + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 32], \ + DataType.f16, DataType.f16, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [16, 8, 32], \ + DataType.f16, DataType.f16, DataType.f16, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [16, 8, 32], \ + DataType.bf16, DataType.bf16, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + ] + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [8] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([ 64, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 128], 3, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + + # Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation) + if math_inst.element_a != math_inst.element_accumulator: + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + math_inst.element_accumulator, + ] + + CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints) + +# + +# +def GenerateSM80_PlanarComplexTensorOp_16816(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + complex_transforms = [ + (ComplexTransform.none, ComplexTransform.none), + (ComplexTransform.conj, ComplexTransform.none), + (ComplexTransform.none, ComplexTransform.conj), + (ComplexTransform.conj, ComplexTransform.conj) + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 16], \ + DataType.f16, DataType.f16, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [16, 8, 16], \ + DataType.bf16, DataType.bf16, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [16, 8, 16], \ + DataType.f16, DataType.f16, DataType.f16, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + ] + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [8, ] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([ 64, 128, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, complex_transforms) + + # Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation) + if math_inst.element_a != math_inst.element_accumulator: + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + math_inst.element_accumulator, + ] + + CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints, complex_transforms) + +# +def GenerateSM80_TensorOp_16832_TN(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 32], \ + DataType.s8, DataType.s8, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + MathInstruction( \ + [16, 8, 32], \ + DataType.u8, DataType.u8, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + ] + + min_cc = 80 + max_cc = 1024 + smem_usage = 164 + + alignment_constraints = [16,] + alignment_constraints_small_channels = [16, 8, 4] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 32, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 32, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 32, 64], 6, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 32, 128, 64], 6, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 64], 10, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 32, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 32, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 32, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 32, 128, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 128], 5, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, DataType.s32] + data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination) + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, + data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination) + + operations = [] + + operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp) + + operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions, + data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp) + + operations += CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, + data_type_mixed, alignment_constraints_small_channels, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp) + + operations += CreateConv2dFewChannelsOperator(manifest, conv_layout, tile_descriptions, + data_type_mixed, alignment_constraints_small_channels, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp) + + for op in operations: + if op.tile_description.threadblock_shape[1] >= 128: + if op.tile_description.threadblock_shape[0] == 32: + op.C.alignment = 8 + else: + op.C.alignment = 16 + else: + op.C.alignment = 8 + +# + +# +def GenerateSM80_SparseTensorOp_16864_TN(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 1): + return + + layouts = [ + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor), + ] + + math_inst = \ + MathInstruction( \ + [16, 8, 64], \ + DataType.s8, DataType.s8, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate) + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [16,] + + tile_descriptions = [ + TileDescription([128, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 128], 3, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 256], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.s8, DataType.s8, DataType.s32, DataType.s32] + data_type_mixed = [DataType.s8, DataType.s8, DataType.s8, DataType.f32] + + CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination) + + operations = [] + + operations += CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp) + + for op in operations: + if op.tile_description.threadblock_shape[1] >= 128: + op.C.alignment = 16 + else: + op.C.alignment = 8 +# + +# +def GenerateSM80_TensorOp_16832_Interleaved(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajorInterleaved32, LayoutType.RowMajorInterleaved32, LayoutType.ColumnMajorInterleaved32), + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 32], \ + DataType.s8, DataType.s8, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + MathInstruction( \ + [16, 8, 32], \ + DataType.u8, DataType.u8, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + ] + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [16,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 64], 10, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32] + + operations = CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp) + + conv_layout = (LayoutType.TensorNC32HW32, LayoutType.TensorC32RSK32, LayoutType.TensorNC32HW32) + + operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions, + data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp) + + for op in operations: + op.C.alignment = 8 +# + +# +def GenerateSM80_TensorOp_16864_TN(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 64], \ + DataType.s4, DataType.s4, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + MathInstruction( \ + [16, 8, 64], \ + DataType.u4, DataType.u4, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + ] + + min_cc = 80 + max_cc = 1024 + alignment_constraints = [32,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 128], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 128], 10, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 128, 256], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 256], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 256], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 256], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 256], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 256], 5, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, DataType.s32] + data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination) + + operations = [] + + operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp) + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, + data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination) + + operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions, + data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp) + + for op in operations: + if op.tile_description.threadblock_shape[1] >= 128: + op.C.alignment = 16 + elif op.tile_description.threadblock_shape[1] == 64: + op.C.alignment = 8 + else: + op.C.alignment = 8 +# + +# +def GenerateSM80_SparseTensorOp_168128_TN(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 1): + return + + layouts = [ + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor), + ] + + math_inst = \ + MathInstruction( \ + [16, 8, 128], \ + DataType.s4, DataType.s4, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate) + + min_cc = 80 + max_cc = 1024 + alignment_constraints = [32,] + + tile_descriptions = [ + TileDescription([ 64, 64, 256], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 256], 3, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([256, 128, 256], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 256], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 256], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 256], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 512], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 512], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 512], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 512], 3, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.s4, DataType.s4, DataType.s32, DataType.s32] + data_type_mixed = [DataType.s4, DataType.s4, DataType.s4, DataType.f32] + + CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination) + + operations = [] + + operations += CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp) + + for op in operations: + if op.tile_description.threadblock_shape[1] > 128: + op.C.alignment = 16 + else: + op.C.alignment = 8 +# + +# +def GenerateSM80_TensorOp_16864_Interleaved(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajorInterleaved64, LayoutType.RowMajorInterleaved64, LayoutType.ColumnMajorInterleaved64), + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 64], \ + DataType.s4, DataType.s4, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + MathInstruction( \ + [16, 8, 64], \ + DataType.u4, DataType.u4, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_saturate), + ] + + min_cc = 80 + max_cc = 1024 + alignment_constraints = [32,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 128], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32] + + operations = [] + + operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp) + + conv_layout = (LayoutType.TensorNC64HW64, LayoutType.TensorC64RSK64, LayoutType.TensorNC64HW64) + + operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions, + data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp) + + for op in operations: + op.C.alignment = 16 +# + +# +def GenerateSM80_TensorOp_168256(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 256], \ + DataType.b1, DataType.b1, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.xor_popc), + MathInstruction( \ + [16, 8, 256], \ + DataType.b1, DataType.b1, DataType.s32, \ + OpcodeClass.TensorOp, \ + MathOperation.and_popc), + ] + + min_cc = 80 + max_cc = { + MathOperation.xor_popc: 89, + MathOperation.and_popc: 90 + } + + alignment_constraints = [128,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 512], 3, [4, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([128, 256, 512], 3, [2, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([256, 64, 512], 4, [4, 1, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([ 64, 256, 512], 4, [1, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([128, 128, 512], 5, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([128, 64, 512], 6, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([ 64, 128, 512], 6, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([ 64, 64, 512], 10, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([256, 128, 1024], 3, [4, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([128, 256, 1024], 3, [2, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([256, 64, 1024], 4, [4, 1, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([ 64, 256, 1024], 4, [1, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([128, 128, 1024], 4, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([128, 64, 1024], 3, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([ 64, 128, 1024], 3, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + TileDescription([ 64, 64, 1024], 5, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]), + ] + + data_type = [DataType.b1, DataType.b1, DataType.s32, DataType.s32] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + +# + +# +def GenerateSM80_TensorOp_1688(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 8], \ + DataType.tf32, DataType.tf32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add) + ] + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [4, 2, 1] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + data_type_mixed = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_a, + math_inst.element_accumulator, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type_mixed, alignment_constraints) + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints) + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints) +# + +# +def GenerateSM80_TensorOp_1688_fast_math(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 8], \ + DataType.tf32, DataType.tf32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [16, 8, 8], \ + DataType.f16, DataType.f16, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_fast_f16), + MathInstruction( \ + [16, 8, 8], \ + DataType.bf16, DataType.bf16, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_fast_bf16), + ] + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [4, 2, 1] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints) +# + +# +def GenerateSM80_TensorOp_1688_fast_fp32_math(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 8], \ + DataType.f32, DataType.f32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_fast_f32), + ] + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [4, 2, 1] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([128, 128, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints) +# + +def GenerateSM80_TensorOp_1688_fast_fp32_math_complex(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_inst = MathInstruction( \ + [16, 8, 8], \ + DataType.f32, DataType.f32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex_fast_f32) + + min_cc = 80 + max_cc = 1024 + + tile_descriptions = [ + TileDescription([128, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32 + ] + + alignment_constraints = [1,] + + complex_transforms = [ + (ComplexTransform.none, ComplexTransform.none), + (ComplexTransform.conj, ComplexTransform.none), + (ComplexTransform.none, ComplexTransform.conj), + (ComplexTransform.conj, ComplexTransform.conj) + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, complex_transforms) + + +# +def GenerateSM80_SparseTensorOp_16816_fast_math(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 1): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.RowMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.RowMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.RowMajor), + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 16], \ + DataType.tf32, DataType.tf32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + ] + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [4] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 32], 3, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32] + + CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) +# + +# +def GenerateSM80_TensorOp_1688_complex(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_inst = MathInstruction( \ + [16, 8, 8], \ + DataType.tf32, DataType.tf32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex) + + min_cc = 80 + max_cc = 1024 + + tile_descriptions = [ + TileDescription([128, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 16], 4, [1, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32 + ] + + alignment_constraints = [1,] + + complex_transforms = [ + (ComplexTransform.none, ComplexTransform.none), + (ComplexTransform.conj, ComplexTransform.none), + (ComplexTransform.none, ComplexTransform.conj), + (ComplexTransform.conj, ComplexTransform.conj) + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, complex_transforms) +# + +# +def GenerateSM80_TensorOp_1688_rank_k(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 8], \ + DataType.tf32, DataType.tf32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [16, 8, 8], \ + DataType.f32, DataType.f32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_fast_f32), + ] + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1, 2, 4] # Alignment only applies to A in SYRK + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), + #TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc), + #TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.f32, DataType.f32, DataType.f32] + + CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) +# + +# +def GenerateSM80_TensorOp_1688_rank_k_complex(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 8], \ + DataType.tf32, DataType.tf32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex), + MathInstruction( \ + [16, 8, 8], \ + DataType.f32, DataType.f32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex_fast_f32), + ] + + min_cc = 80 + max_cc = 1024 + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + DataType.cf32, DataType.cf32, DataType.cf32 + ] + + alignment_constraints = [1,] + + # SYRK + CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) + + # HERK + CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.hermitian) +# + +# +def GenerateSM80_TensorOp_1688_trmm(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + diag_types = [ + DiagType.NonUnit, DiagType.Unit, + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 8], \ + DataType.tf32, DataType.tf32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [16, 8, 8], \ + DataType.f32, DataType.f32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_fast_f32), + ] + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1, 2, 4] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc), + #TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32] + + CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \ + data_type, alignment_constraints) +# + +# +def GenerateSM80_TensorOp_1688_trmm_complex(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + diag_types = [ + DiagType.NonUnit, DiagType.Unit, + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 8], \ + DataType.tf32, DataType.tf32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex), + MathInstruction( \ + [16, 8, 8], \ + DataType.f32, DataType.f32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex_fast_f32), + ] + + min_cc = 80 + max_cc = 1024 + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32 + ] + + alignment_constraints = [1,] + + complex_transforms = [ + ComplexTransform.none, ComplexTransform.conj, + ] + + CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \ + data_type, alignment_constraints, complex_transforms) +# + +# +def GenerateSM80_TensorOp_1688_symm(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + # A and B have same layouts + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 8], \ + DataType.tf32, DataType.tf32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add), + MathInstruction( \ + [16, 8, 8], \ + DataType.f32, DataType.f32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_fast_f32), + ] + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [ + 1, 2, 4 + ] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), + #TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc), + #TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32] + + CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) +# + +# +def GenerateSM80_TensorOp_1688_symm_complex(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_instructions = [ + MathInstruction( \ + [16, 8, 8], \ + DataType.tf32, DataType.tf32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex), + MathInstruction( \ + [16, 8, 8], \ + DataType.f32, DataType.f32, DataType.f32, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex_fast_f32), + ] + + min_cc = 80 + max_cc = 1024 + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32 + ] + + alignment_constraints = [1,] + + # SYMM + CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) + + # HEMM + CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.hermitian) +# + +# +def GenerateSM80_TensorOp_884(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_inst = \ + MathInstruction( \ + [8, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add) + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 32, 16], 3, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([32, 256, 16], 3, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) +# + +# +def GenerateSM80_TensorOp_884_complex(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_inst = \ + MathInstruction( \ + [8, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex) + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 64, 8 ], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 8 ], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 8 ], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([16, 32, 8 ], 4, [1, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 16, 8 ], 4, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([16, 32, 16], 4, [1, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 16, 16], 3, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64] + + complex_transforms = [ + (ComplexTransform.none, ComplexTransform.none), + (ComplexTransform.conj, ComplexTransform.none), + (ComplexTransform.none, ComplexTransform.conj), + (ComplexTransform.conj, ComplexTransform.conj) + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, complex_transforms) + +# +def GenerateSM80_TensorOp_884_complex_gaussian(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_inst = \ + MathInstruction( \ + [8, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex_gaussian) + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64] + + complex_transforms = [ + (ComplexTransform.none, ComplexTransform.none), + (ComplexTransform.conj, ComplexTransform.none), + (ComplexTransform.none, ComplexTransform.conj), + (ComplexTransform.conj, ComplexTransform.conj) + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, complex_transforms) +# + +# +def GenerateSM80_TensorOp_884_rank_k(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_inst = \ + MathInstruction( \ + [8, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add) + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.f64, DataType.f64, DataType.f64] + + CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) +# + +# +def GenerateSM80_TensorOp_884_rank_k_complex(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_inst = \ + MathInstruction( \ + [8, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex) + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64] + + # SYRK computation + CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) + + # HERK computation + CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.hermitian) + +# + +# +def GenerateSM80_TensorOp_884_rank_k_complex_gaussian(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_inst = \ + MathInstruction( \ + [8, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex_gaussian) + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64] + + complex_transforms = [ComplexTransform.none,] + + # SYRK computation + CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) + + # HERK computation + CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.hermitian) +# + +# +def GenerateSM80_TensorOp_884_trmm(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + diag_types = [ + DiagType.NonUnit, DiagType.Unit, + ] + + math_inst = \ + MathInstruction( \ + [8, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add) + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64] + + CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \ + data_type, alignment_constraints) +# + +# +def GenerateSM80_TensorOp_884_trmm_complex(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + diag_types = [ + DiagType.NonUnit, DiagType.Unit, + ] + + math_inst = \ + MathInstruction( \ + [8, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex) + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64] + + complex_transforms = [ + ComplexTransform.none, ComplexTransform.conj, + ] + + CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \ + data_type, alignment_constraints, complex_transforms) +# + + +# +def GenerateSM80_TensorOp_884_trmm_complex_gaussian(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + diag_types = [ + DiagType.NonUnit, DiagType.Unit, + ] + + math_inst = \ + MathInstruction( \ + [8, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex_gaussian) + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64] + + complex_transforms = [ + ComplexTransform.none, ComplexTransform.conj, + ] + + CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \ + data_type, alignment_constraints, complex_transforms) +# + +# +def GenerateSM80_TensorOp_884_symm(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_inst = \ + MathInstruction( \ + [8, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add) + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64] + + CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) +# + +# +def GenerateSM80_TensorOp_884_symm_complex(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_inst = \ + MathInstruction( \ + [8, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex) + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64] + + # SYMM computation + CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) + + # HEMM computation + CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.hermitian) +# + +# +def GenerateSM80_TensorOp_884_symm_complex_gaussian(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 0): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_inst = \ + MathInstruction( \ + [8, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex_gaussian) + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64] + + complex_transforms = [ComplexTransform.none,] + + # SYMM computation + CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) + + # HEMM computation + CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.hermitian) +# + +################################################################################################### + +# +def GenerateSM80_Simt_f32(manifest, cuda_version): + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [1, 1, 1], \ + DataType.f32, DataType.f32, DataType.f32, \ + OpcodeClass.Simt, \ + MathOperation.multiply_add), + ] + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([256, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 8], 5, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 128, 8], 4, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 256, 8], 4, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 8], 4, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 8], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 8], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([128, 32, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 32, 128, 8], 5, [1, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints) +# + + +# +def GenerateSM80_Simt_f64(manifest, cuda_version): + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_instructions = [ + MathInstruction( \ + [1, 1, 1], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.Simt, \ + MathOperation.multiply_add), + ] + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + for math_inst in math_instructions: + tile_descriptions = [ + TileDescription([128, 128, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 64, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([128, 32, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([ 32, 128, 8], 5, [1, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [ + math_inst.element_a, + math_inst.element_b, + math_inst.element_accumulator, + math_inst.element_accumulator, + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints) +# + + +################################################################################################## +# +def GenerateSM80_Simt_complex(manifest, cuda_version): + math_instructions = [ + MathInstruction( \ + [1, 1, 1], \ + DataType.f32, DataType.f32, DataType.f32, \ + OpcodeClass.Simt, \ + MathOperation.multiply_add_complex), + ] + + min_cc = 80 + max_cc = 1024 + + alignment_constraints = [1,] + + data_type = [ + DataType.cf32, + DataType.cf32, + DataType.cf32, + DataType.cf32 + ] + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + complex_transforms = [ + (ComplexTransform.none, ComplexTransform.none), + (ComplexTransform.conj, ComplexTransform.none), + (ComplexTransform.none, ComplexTransform.conj), + (ComplexTransform.conj, ComplexTransform.conj) + ] + + for math_inst in math_instructions: + + tile_descriptions = [ + TileDescription([128, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 128, 8], 4, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints, complex_transforms) + + conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) + CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints) +# + +################################################################################################### + +# +def GenerateSM80(manifest, cuda_version): + GenerateSM80_TensorOp_16816(manifest, cuda_version) + GenerateSM80_SparseTensorOp_16832(manifest, cuda_version) + GenerateSM80_PlanarComplexTensorOp_16816(manifest, cuda_version) + GenerateSM80_TensorOp_1688(manifest, cuda_version) + GenerateSM80_TensorOp_1688_fast_math(manifest, cuda_version) + GenerateSM80_SparseTensorOp_16816_fast_math(manifest, cuda_version) + GenerateSM80_TensorOp_1688_complex(manifest, cuda_version) + # 3xTF32 + GenerateSM80_TensorOp_1688_fast_fp32_math(manifest, cuda_version) + GenerateSM80_TensorOp_1688_fast_fp32_math_complex(manifest, cuda_version) + GenerateSM80_TensorOp_1688_rank_k(manifest, cuda_version) + GenerateSM80_TensorOp_1688_rank_k_complex(manifest, cuda_version) + GenerateSM80_TensorOp_1688_trmm(manifest, cuda_version) + GenerateSM80_TensorOp_1688_trmm_complex(manifest, cuda_version) + GenerateSM80_TensorOp_1688_symm(manifest, cuda_version) + GenerateSM80_TensorOp_1688_symm_complex(manifest, cuda_version) + GenerateSM80_TensorOp_884(manifest, cuda_version) + GenerateSM80_TensorOp_884_complex(manifest, cuda_version) + GenerateSM80_TensorOp_884_complex_gaussian(manifest, cuda_version) + GenerateSM80_TensorOp_884_rank_k(manifest, cuda_version) + GenerateSM80_TensorOp_884_rank_k_complex(manifest, cuda_version) + GenerateSM80_TensorOp_884_rank_k_complex_gaussian(manifest, cuda_version) + GenerateSM80_TensorOp_884_trmm(manifest, cuda_version) + GenerateSM80_TensorOp_884_trmm_complex(manifest, cuda_version) + GenerateSM80_TensorOp_884_trmm_complex_gaussian(manifest, cuda_version) + GenerateSM80_TensorOp_884_symm(manifest, cuda_version) + GenerateSM80_TensorOp_884_symm_complex(manifest, cuda_version) + GenerateSM80_TensorOp_884_symm_complex_gaussian(manifest, cuda_version) + GenerateSM80_TensorOp_16832_TN(manifest, cuda_version) + GenerateSM80_SparseTensorOp_16864_TN(manifest, cuda_version) + GenerateSM80_TensorOp_16832_Interleaved(manifest, cuda_version) + GenerateSM80_TensorOp_16864_TN(manifest, cuda_version) + GenerateSM80_SparseTensorOp_168128_TN(manifest, cuda_version) + GenerateSM80_TensorOp_16864_Interleaved(manifest, cuda_version) + GenerateSM80_TensorOp_168256(manifest, cuda_version) + GenerateSM80_Simt_f32(manifest, cuda_version) + GenerateSM80_Simt_f64(manifest, cuda_version) + GenerateSM80_Simt_complex(manifest, cuda_version) + +################################################################################################### + +# +def GenerateSM90_TensorOp_16b_WGMMA_gemm(manifest, cuda_version): + if not CudaToolkitVersionSatisfies(cuda_version, 12, 0): + return + + # layouts for ABC and their alignments. + layouts = [ + [[LayoutType.ColumnMajor, 8], [LayoutType.ColumnMajor, 8], [LayoutType.ColumnMajor, 1]], + [[LayoutType.ColumnMajor, 8], [LayoutType.RowMajor, 8], [LayoutType.ColumnMajor, 1]], + [[LayoutType.RowMajor, 8], [LayoutType.ColumnMajor, 8], [LayoutType.ColumnMajor, 1]], + [[LayoutType.RowMajor, 8], [LayoutType.RowMajor, 8], [LayoutType.ColumnMajor, 1]], + ] + + math_instructions = [ + MathInstruction( + [64, 128, 16], + DataType.f16, DataType.f16, DataType.f16, + OpcodeClass.TensorOp, + MathOperation.multiply_add), + MathInstruction( + [64, 128, 16], + DataType.f16, DataType.f16, DataType.f32, + OpcodeClass.TensorOp, + MathOperation.multiply_add), + MathInstruction( + [64, 128, 16], + DataType.bf16, DataType.bf16, DataType.f32, + OpcodeClass.TensorOp, + MathOperation.multiply_add), + ] + + min_cc = 90 + max_cc = 90 + + for math_inst in math_instructions: + tile_descriptions_small = [ + # Not compatible with TmaWarpSpecializedCooperative + TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]), + TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]), + ] + tile_descriptions_medium = [ + TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]), + TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]), + ] + tile_descriptions_large = [ + TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]), + TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]), + TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1]*2, math_inst.instruction_shape[2]*4], + 0, [4, 2, 1], math_inst, min_cc, max_cc, [2,1,1]), + TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1]*2, math_inst.instruction_shape[2]*4], + 0, [4, 2, 1], math_inst, min_cc, max_cc, [1,2,1]), + ] + tile_descriptions = tile_descriptions_medium + tile_descriptions_large + + data_type = { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : math_inst.element_accumulator, + "d_type" : math_inst.element_accumulator, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + } + + # Set alignment c based on Destination format. + for layout in layouts: + if data_type["c_type"] in [DataType.s32, DataType.f32]: + layout[2][1] = 4 + elif data_type["c_type"] in [DataType.f16, DataType.bf16]: + layout[2][1] = 8 + + if CudaToolkitVersionSatisfies(cuda_version, 12, 1): + schedules = [ + [KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto], + [KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized], + [KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized] + ] + stream_k_schedules = [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized]] + else: + schedules = [ + [KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto], + [KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized] + # TmaWarpSpecializedCooperative and TmaWarpSpecializedPingpong require CUDA version >= 12.1 for optimal performance. + ] + stream_k_schedules = [] + + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, schedules) + + if CudaToolkitVersionSatisfies(cuda_version, 12, 1): + # Add stream-K variants + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, stream_k_schedules, tile_schedulers=[TileSchedulerType.StreamK]) + + # persistent kernels with TMA epilogues + if CudaToolkitVersionSatisfies(cuda_version, 12, 1): + # not enough smem for 256x128 f32 out with C allocation + if data_type["d_type"] == DataType.f32: + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions_medium, data_type, + [[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]]) + + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions_medium, data_type, + [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]], + tile_schedulers=[TileSchedulerType.StreamK]) + else: + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, + [[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]]) + + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, + [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]], + tile_schedulers=[TileSchedulerType.StreamK]) + + # Emit instance without C allocation + load + data_type["c_type"] = DataType.void + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, + [[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]]) + + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, + [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]], + tile_schedulers=[TileSchedulerType.StreamK]) + + # for mixed precision kernels, also generate kernels that write output matrix in the A/B format + # Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation) + if math_inst.element_a != math_inst.element_accumulator: + data_type_mixed = { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : math_inst.element_a, + "d_type" : math_inst.element_a, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + } + + # Set alignment c based on Destination format. + for layout in layouts: + if data_type_mixed["c_type"] in [DataType.s32, DataType.f32]: + layout[2][1] = 4 + elif data_type_mixed["c_type"] in [DataType.f16, DataType.bf16]: + layout[2][1] = 8 + + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed, schedules) + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed, stream_k_schedules, tile_schedulers=[TileSchedulerType.StreamK]) + + # persistent kernels with TMA epilogues + if CudaToolkitVersionSatisfies(cuda_version, 12, 1): + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed, + [[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]]) + + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed, + [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]], + tile_schedulers=[TileSchedulerType.StreamK]) + + # Emit instance without C allocation+load + data_type_mixed["c_type"] = DataType.void + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed, + [[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]]) + + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed, + [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]], + tile_schedulers=[TileSchedulerType.StreamK]) + +# +def GenerateSM90_TensorOp_tf32_WGMMA_gemm(manifest, cuda_version): + if not CudaToolkitVersionSatisfies(cuda_version, 12, 0): + return + + # layouts for ABC and their alignments + layouts_tf32 = [ + [[LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 4]], + [[LayoutType.RowMajor, 4], [LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 4]], + [[LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 4]], + [[LayoutType.ColumnMajor, 4], [LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 4]], + ] + + math_inst = MathInstruction( + [64, 128, 8], + DataType.tf32, DataType.tf32, DataType.f32, + OpcodeClass.TensorOp, + MathOperation.multiply_add) + + math_inst_largeN = MathInstruction( + [64, 256, 8], + DataType.tf32, DataType.tf32, DataType.f32, + OpcodeClass.TensorOp, + MathOperation.multiply_add) + + min_cc = 90 + max_cc = 90 + + tile_descriptions_large = [ + TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]), + TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]), + TileDescription([math_inst_largeN.instruction_shape[0]*2, math_inst_largeN.instruction_shape[1], math_inst_largeN.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst_largeN, min_cc, max_cc, [2,1,1]), + TileDescription([math_inst_largeN.instruction_shape[0]*2, math_inst_largeN.instruction_shape[1], math_inst_largeN.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst_largeN, min_cc, max_cc, [1,2,1]), + ] + + tile_descriptions_medium = [ + TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]), + TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]), + ] + + tile_descriptions_small = [ + TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]), + TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]), + ] + tile_descriptions = tile_descriptions_medium + tile_descriptions_small + + data_types = [ + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : math_inst.element_accumulator, + "d_type" : math_inst.element_accumulator, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + }, + { + "a_type" : DataType.f32, + "b_type" : DataType.f32, + "c_type" : math_inst.element_accumulator, + "d_type" : math_inst.element_accumulator, + "acc_type" : math_inst.element_accumulator, + "epi_type" : DataType.f32 + } + ] + + schedules_default = [ + [KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto], + [KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized], + ] + + # TMA kernels with TT layout use EpilogueTransposed (NoSmemWarpSpecialized with swapped strides), + # because they use NN kernels underneath and transposing its epilogue will get the correct output + schedules_transposed_epilogue = [ + [KernelScheduleType.ScheduleAuto, EpilogueScheduleType.EpilogueTransposed], + [KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.EpilogueTransposed] + ] + + # TMA kernels with TN, NN, or NT layout + layouts_tf32_tn_nn_nt = [layouts_tf32[0], layouts_tf32[2], layouts_tf32[3]] + # TMA kernels with TT layout + layouts_tf32_tt = [layouts_tf32[1]] + + if CudaToolkitVersionSatisfies(cuda_version, 12, 1): + CreateGemmUniversal3xOperator(manifest, layouts_tf32_tn_nn_nt, tile_descriptions_small, data_types, [ + [KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized] + ]) + + CreateGemmUniversal3xOperator(manifest, layouts_tf32_tn_nn_nt, tile_descriptions_medium, data_types, [ + [KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative], + [KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized] + ]) + + CreateGemmUniversal3xOperator(manifest, layouts_tf32_tn_nn_nt, tile_descriptions_large, data_types, [ + [KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized], + ]) + + CreateGemmUniversal3xOperator(manifest, layouts_tf32_tt, tile_descriptions_small, data_types, [ + [KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.EpilogueTransposed] + ]) + + CreateGemmUniversal3xOperator(manifest, layouts_tf32_tt, tile_descriptions_medium, data_types, [ + [KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.EpilogueTransposed], + [KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.EpilogueTransposed] + ]) + + CreateGemmUniversal3xOperator(manifest, layouts_tf32_tt, tile_descriptions_large, data_types, [ + [KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.EpilogueTransposed], + ]) + + else: + CreateGemmUniversal3xOperator(manifest, layouts_tf32_tn_nn_nt, tile_descriptions, data_types, schedules_default) + CreateGemmUniversal3xOperator(manifest, layouts_tf32_tt, tile_descriptions, data_types, schedules_transposed_epilogue) + +# +def GenerateSM90_TensorOp_int8_WGMMA_gemm(manifest, cuda_version): + if not CudaToolkitVersionSatisfies(cuda_version, 12, 0): + return + + # layouts for ABC and their alignments + layouts = [ + [[LayoutType.RowMajor, 16], [LayoutType.ColumnMajor, 16], [LayoutType.ColumnMajor, 16]], + ] + + math_instructions = [ + MathInstruction( + [64, 128, 32], + DataType.s8, DataType.s8, DataType.s32, + OpcodeClass.TensorOp, + MathOperation.multiply_add), + MathInstruction( + [64, 128, 32], + DataType.u8, DataType.u8, DataType.s32, + OpcodeClass.TensorOp, + MathOperation.multiply_add), + ] + + min_cc = 90 + max_cc = 90 + + for math_inst in math_instructions: + # 64x128x128 + tile_descriptions_small = [ + TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]), + TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]), + ] + # 128x128x128 + tile_descriptions_medium = [ + TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]), + TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]), + ] + tile_descriptions = tile_descriptions_medium + tile_descriptions_small + + data_types = [ + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : math_inst.element_accumulator, + "d_type" : math_inst.element_accumulator, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + }, + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : DataType.s8, + "d_type" : math_inst.element_a, + "acc_type" : math_inst.element_accumulator, + "epi_type" : DataType.f32 + } + ] + + for data_type in data_types: + for layout in layouts: + layout[2][1] = 128 // DataTypeSize[data_type["d_type"]] + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type) + + # persistent kernels with TMA epilogues + if CudaToolkitVersionSatisfies(cuda_version, 12, 1): + # Emit instance without C allocation+load + data_types += [ + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : DataType.void, + "d_type" : math_inst.element_accumulator, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + } + ] + for data_type in data_types: + # Set output alignment based on destination format first + for layout in layouts: + layout[2][1] = 128 // DataTypeSize[data_type["d_type"]] + # Pingpong persistent + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, + [[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized]]) + # Cooperative persistent + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions_medium, data_type, + [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative], + [KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized]], + tile_schedulers=[TileSchedulerType.Persistent, TileSchedulerType.StreamK] + ) + +def GenerateSM90_TensorOp_fp8_WGMMA_gemm(manifest, cuda_version): + if not CudaToolkitVersionSatisfies(cuda_version, 12, 0): + return + + # layouts for ABC and their alignments + layouts = [ + [[LayoutType.RowMajor, 16], [LayoutType.ColumnMajor, 16], [LayoutType.ColumnMajor, 1]], # TN Layout + ] + + math_instructions = [ + # inst 64x128x32 + MathInstruction( + [64, 128, 32], + DataType.e4m3, DataType.e4m3, DataType.f32, + OpcodeClass.TensorOp, + MathOperation.multiply_add), + MathInstruction( + [64, 128, 32], + DataType.e4m3, DataType.e5m2, DataType.f32, + OpcodeClass.TensorOp, + MathOperation.multiply_add), + MathInstruction( + [64, 128, 32], + DataType.e5m2, DataType.e4m3, DataType.f32, + OpcodeClass.TensorOp, + MathOperation.multiply_add), + MathInstruction( + [64, 128, 32], + DataType.e5m2, DataType.e5m2, DataType.f32, + OpcodeClass.TensorOp, + MathOperation.multiply_add), + ] + + min_cc = 90 + max_cc = 90 + + for math_inst in math_instructions: + data_types = [ + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : DataType.f32, + "d_type" : DataType.f32, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + }, + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : DataType.f32, + "d_type" : DataType.e4m3, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + }, + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : DataType.f32, + "d_type" : DataType.e5m2, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + }, + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : DataType.bf16, + "d_type" : DataType.bf16, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + }, + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : DataType.bf16, + "d_type" : DataType.e4m3, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + }, + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : DataType.bf16, + "d_type" : DataType.e5m2, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + }, + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : DataType.f16, + "d_type" : DataType.f16, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + }, + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : DataType.f16, + "d_type" : DataType.e4m3, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + }, + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : DataType.f16, + "d_type" : DataType.e5m2, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + }, + ] + + data_types_large_tile = [ + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : DataType.void, + "d_type" : DataType.e5m2, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + }, + { + "a_type" : math_inst.element_a, + "b_type" : math_inst.element_b, + "c_type" : DataType.void, + "d_type" : DataType.e4m3, + "acc_type" : math_inst.element_accumulator, + "epi_type" : math_inst.element_accumulator + } + ] + + if math_inst.instruction_shape[1] == 128: + tile_descriptions_small = [ + # 64x128x128 + TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]), + TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]), + ] + tile_descriptions_large = [ + # 256x128x128 + TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]), + TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]), + ] + tile_descriptions = [ + # 128x128x128 + TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]), + TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4], + 0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]), + ] + + else: + assert False, "math inst is not supported" + + # some schedules disabled to save on library size + if CudaToolkitVersionSatisfies(cuda_version, 12, 1): + schedules = [ + #[KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto], + [KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized], + [KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized] + ] + stream_k_schedules = [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized]] + else: + schedules = [ + # [KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto], + [KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized] + # TmaWarpSpecializedCooperative require CUDA version >= 12.1 for optimal performance. + ] + stream_k_schedules = [] + + for data_type in data_types: + # With No-SMEM epilogues + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, schedules) + + if CudaToolkitVersionSatisfies(cuda_version, 12, 1): + # Persistent kernels with TMA epilogues + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, + [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative], + [KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum, EpilogueScheduleType.TmaWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum, EpilogueScheduleType.TmaWarpSpecializedCooperative]]) + + # Small tiles + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions_small, data_type, + [[KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum, EpilogueScheduleType.TmaWarpSpecialized], + [KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized]]) + + # Large tiles + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions_large, data_types_large_tile, + [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative], + [KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum, EpilogueScheduleType.TmaWarpSpecializedCooperative]]) + + # Add stream-K variants (with and without TMA epilogues) + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, stream_k_schedules, tile_schedulers=[TileSchedulerType.StreamK]) + CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, + [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative], + [KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum, EpilogueScheduleType.TmaWarpSpecializedCooperative]], + tile_schedulers=[TileSchedulerType.StreamK]) + + +# +def GenerateSM90_TensorOp_1684(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 8): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_inst = MathInstruction( + [16, 8, 4], + DataType.f64, DataType.f64, DataType.f64, + OpcodeClass.TensorOp, + MathOperation.multiply_add) + + min_cc = 90 + max_cc = 90 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([256, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([256, 32, 16], 3, [4, 1, 1], math_inst, min_cc, max_cc), + TileDescription([32, 256, 16], 3, [1, 4, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64] + + CreateGemmOperator(manifest, layouts, tile_descriptions, + data_type, alignment_constraints) + +# + +# +def GenerateSM90_TensorOp_1684_complex(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 8): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_inst = \ + MathInstruction( \ + [16, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex) + + min_cc = 90 + max_cc = 90 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 64, 8 ], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 8 ], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 8 ], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([16, 32, 8 ], 4, [1, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 16, 8 ], 4, [2, 1, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([16, 32, 16], 4, [1, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 16, 16], 3, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64] + + complex_transforms = [ + (ComplexTransform.none, ComplexTransform.none), + (ComplexTransform.conj, ComplexTransform.none), + (ComplexTransform.none, ComplexTransform.conj), + (ComplexTransform.conj, ComplexTransform.conj) + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, complex_transforms) +# + +# +def GenerateSM90_TensorOp_1684_complex_gaussian(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 8): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + math_inst = \ + MathInstruction( \ + [16, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex_gaussian) + + min_cc = 90 + max_cc = 90 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64] + + complex_transforms = [ + (ComplexTransform.none, ComplexTransform.none), + (ComplexTransform.conj, ComplexTransform.none), + (ComplexTransform.none, ComplexTransform.conj), + (ComplexTransform.conj, ComplexTransform.conj) + ] + + CreateGemmOperator(manifest, layouts, tile_descriptions, \ + data_type, alignment_constraints, complex_transforms) +# + +# +def GenerateSM90_TensorOp_1684_rank_k(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 8): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_inst = \ + MathInstruction( \ + [16, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add) + + min_cc = 90 + max_cc = 90 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.f64, DataType.f64, DataType.f64] + + CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) +# + +# +def GenerateSM90_TensorOp_1684_rank_k_complex(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 8): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_inst = \ + MathInstruction( \ + [16, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex) + + min_cc = 90 + max_cc = 90 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64] + + # SYRK computation + CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) + + # HERK computation + CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.hermitian) + +# + +# +def GenerateSM90_TensorOp_1684_rank_k_complex_gaussian(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 8): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor), + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_inst = \ + MathInstruction( \ + [16, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex_gaussian) + + min_cc = 90 + max_cc = 90 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64] + + complex_transforms = [ComplexTransform.none,] + + # SYRK computation + CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) + + # HERK computation + CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.hermitian) +# + +# +def GenerateSM90_TensorOp_1684_trmm(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 8): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + diag_types = [ + DiagType.NonUnit, DiagType.Unit, + ] + + math_inst = \ + MathInstruction( \ + [16, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add) + + min_cc = 90 + max_cc = 90 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64] + + CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \ + data_type, alignment_constraints) +# + +# +def GenerateSM90_TensorOp_1684_trmm_complex(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 8): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + diag_types = [ + DiagType.NonUnit, DiagType.Unit, + ] + + math_inst = \ + MathInstruction( \ + [16, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex) + + min_cc = 90 + max_cc = 90 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64] + + complex_transforms = [ + ComplexTransform.none, ComplexTransform.conj, + ] + + CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \ + data_type, alignment_constraints, complex_transforms) +# + + +# +def GenerateSM90_TensorOp_1684_trmm_complex_gaussian(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 8): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + diag_types = [ + DiagType.NonUnit, DiagType.Unit, + ] + + math_inst = \ + MathInstruction( \ + [16, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex_gaussian) + + min_cc = 90 + max_cc = 90 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64] + + complex_transforms = [ + ComplexTransform.none, ComplexTransform.conj, + ] + + CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \ + data_type, alignment_constraints, complex_transforms) +# + +# +def GenerateSM90_TensorOp_1684_symm(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 8): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_inst = \ + MathInstruction( \ + [16, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add) + + min_cc = 90 + max_cc = 90 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64] + + CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) +# + +# +def GenerateSM90_TensorOp_1684_symm_complex(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 8): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_inst = \ + MathInstruction( \ + [16, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex) + + min_cc = 90 + max_cc = 90 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc), + TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64] + + # SYMM computation + CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) + + # HEMM computation + CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.hermitian) +# + +# +def GenerateSM90_TensorOp_1684_symm_complex_gaussian(manifest, cuda_version): + + if not CudaToolkitVersionSatisfies(cuda_version, 11, 8): + return + + layouts = [ + (LayoutType.ColumnMajor, LayoutType.ColumnMajor), + ] + + side_modes = [ + SideMode.Left, SideMode.Right, + ] + + fill_modes = [ + FillMode.Lower, FillMode.Upper, + ] + + math_inst = \ + MathInstruction( \ + [16, 8, 4], \ + DataType.f64, DataType.f64, DataType.f64, \ + OpcodeClass.TensorOp, \ + MathOperation.multiply_add_complex_gaussian) + + min_cc = 90 + max_cc = 90 + + alignment_constraints = [1,] + + tile_descriptions = [ + TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), + TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc), + #TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc), + ] + + data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64] + + complex_transforms = [ComplexTransform.none,] + + # SYMM computation + CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.symmetric) + + # HEMM computation + CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \ + data_type, alignment_constraints, BlasMode.hermitian) +# + +################################################################################################### + +# +def GenerateSM90(manifest, cuda_version): + GenerateSM90_TensorOp_16b_WGMMA_gemm(manifest, cuda_version) + GenerateSM90_TensorOp_tf32_WGMMA_gemm(manifest, cuda_version) + GenerateSM90_TensorOp_int8_WGMMA_gemm(manifest, cuda_version) + GenerateSM90_TensorOp_fp8_WGMMA_gemm(manifest, cuda_version) + GenerateSM90_TensorOp_1684(manifest, cuda_version) + GenerateSM90_TensorOp_1684_complex(manifest, cuda_version) + GenerateSM90_TensorOp_1684_complex_gaussian(manifest, cuda_version) + GenerateSM90_TensorOp_1684_rank_k(manifest, cuda_version) + GenerateSM90_TensorOp_1684_rank_k_complex(manifest, cuda_version) + GenerateSM90_TensorOp_1684_rank_k_complex_gaussian(manifest, cuda_version) + GenerateSM90_TensorOp_1684_trmm(manifest, cuda_version) + GenerateSM90_TensorOp_1684_trmm_complex(manifest, cuda_version) + GenerateSM90_TensorOp_1684_trmm_complex_gaussian(manifest, cuda_version) + GenerateSM90_TensorOp_1684_symm(manifest, cuda_version) + GenerateSM90_TensorOp_1684_symm_complex(manifest, cuda_version) + GenerateSM90_TensorOp_1684_symm_complex_gaussian(manifest, cuda_version) + +################################################################################################### + +def numeric_log_level(log_level: str) -> int: + """ + Converts the string identifier of the log level into the numeric identifier used + in setting the log level + + :param x: string representation of log level (e.g., 'INFO', 'DEBUG') + :type x: str + + :return: numeric representation of log level + :rtype: int + """ + numeric_level = getattr(logging, log_level.upper(), None) + if not isinstance(numeric_level, int): + raise ValueError(f'Invalid log level: {log_level}') + return numeric_level + + +# This function for defining the ArgumentParser is used to make it easy for the CUTLASS Python interface +# to leverage the functionality in this file without running this script via a shell prompt. +def define_parser(): + parser = argparse.ArgumentParser(description="Generates device kernel registration code for CUTLASS Kernels") + parser.add_argument("--operations", default="all", help="Specifies the operation to generate (gemm, all)") + parser.add_argument("--build-dir", default=".", required=False, help="CUTLASS top-level build directory") + parser.add_argument("--curr-build-dir", default=".", help="CUTLASS current build directory. cmake files will be emitted in this directory") + parser.add_argument("--generator-target", default='library', help="Target of CUTLASS Library Generator.") + parser.add_argument("--architectures", default='53;60;61;70;75;80;90', help="Target compute architectures") + parser.add_argument("--kernels", default='', help='Comma delimited list to filter kernels by name.') + parser.add_argument("--ignore-kernels", default='', help='Comma delimited list of kernels to exclude from build.') + parser.add_argument("--filter-by-cc", default='True', type=str, help='If enabled, kernels whose compute capability range is not satisfied by the build target are excluded.') + parser.add_argument("--cuda-version", default="11.0.0", help="Semantic version string of CUDA Toolkit") + parser.add_argument('--kernel-filter-file', type=str, default=None, required=False, help='Full path of filter file') + parser.add_argument('--selected-kernel-list', type=str, default=None, required=False, + help='Specify the output log file containing all enabled kernels in this build') + parser.add_argument("--interface-dir", default=None, required=False, help="Interface header to kernels") + parser.add_argument("--disable-full-archs-compilation", action="store_true", required=False, help="Disable compilation for every archs in --architectures") + parser.add_argument("--log-level", default='info', type=numeric_log_level, required=False, + help='Logging level to be used by the generator script') + return parser + + +if __name__ == "__main__": + parser = define_parser() + args = parser.parse_args() + + # Set the logging level based on the user-provided `--log-level` command-line option + logging.basicConfig(level=args.log_level) + + manifest = Manifest(args) + + GenerateSM50(manifest, args.cuda_version) + GenerateSM60(manifest, args.cuda_version) + GenerateSM61(manifest, args.cuda_version) + GenerateSM70(manifest, args.cuda_version) + GenerateSM75(manifest, args.cuda_version) + GenerateSM80(manifest, args.cuda_version) + GenerateSM90(manifest, args.cuda_version) + if 'library' in args.generator_target.split(','): + manifest.emit(GeneratorTarget.Library) + + if args.selected_kernel_list is not None: + if len(manifest.selected_kernels) > 0: + with open(args.selected_kernel_list, 'w') as file_writer: + for line in manifest.selected_kernels: + file_writer.write("%s\n" % line) + +################################################################################################### diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/library.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/library.py new file mode 100644 index 0000000000000000000000000000000000000000..a1d75c21e329c2bf4f0a92d4ce5788fed565248b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/library.py @@ -0,0 +1,990 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Data types and tags used for emitting CUTLASS C++ kernels +""" + +import enum +import re + +# The following block implements enum.auto() for Python 3.5 variants that don't include it such +# as the default 3.5.2 on Ubuntu 16.04. +# +# https://codereview.stackexchange.com/questions/177309/reimplementing-pythons-enum-auto-for-compatibility + +try: + from enum import auto as enum_auto +except ImportError: + __cutlass_library_auto_enum = 0 + def enum_auto() -> int: + global __cutlass_library_auto_enum + i = __cutlass_library_auto_enum + __cutlass_library_auto_enum += 1 + return i + +################################################################################################### + +# +class GeneratorTarget(enum.Enum): + Library = enum_auto() +# +GeneratorTargetNames = { + GeneratorTarget.Library: 'library' +} +# + +################################################################################################### + +# +class DataType(enum.Enum): + void = enum_auto() # primarily used to disable C tensor for epilogues + b1 = enum_auto() + u4 = enum_auto() + u8 = enum_auto() + u16 = enum_auto() + u32 = enum_auto() + u64 = enum_auto() + s4 = enum_auto() + s8 = enum_auto() + s16 = enum_auto() + s32 = enum_auto() + s64 = enum_auto() + e4m3 = enum_auto() + e5m2 = enum_auto() + f16 = enum_auto() + bf16 = enum_auto() + f32 = enum_auto() + tf32 = enum_auto() + f64 = enum_auto() + cf16 = enum_auto() + cbf16 = enum_auto() + cf32 = enum_auto() + ctf32 = enum_auto() + cf64 = enum_auto() + cs4 = enum_auto() + cs8 = enum_auto() + cs16 = enum_auto() + cs32 = enum_auto() + cs64 = enum_auto() + cu4 = enum_auto() + cu8 = enum_auto() + cu16 = enum_auto() + cu32 = enum_auto() + cu64 = enum_auto() + invalid = enum_auto() + +# +ShortDataTypeNames = { + DataType.s32: 'i', + DataType.e4m3: 'e4m3', + DataType.e5m2: 'e5m2', + DataType.f16: 'h', + DataType.f32: 's', + DataType.f64: 'd', + DataType.cf32: 'c', + DataType.cf64: 'z', +} + +# +DataTypeNames = { + DataType.void: "void", + DataType.b1: "b1", + DataType.u4: "u4", + DataType.u8: "u8", + DataType.u16: "u16", + DataType.u32: "u32", + DataType.u64: "u64", + DataType.s4: "s4", + DataType.s8: "s8", + DataType.s16: "s16", + DataType.s32: "s32", + DataType.s64: "s64", + DataType.e4m3: 'e4m3', + DataType.e5m2: 'e5m2', + DataType.f16: "f16", + DataType.bf16: "bf16", + DataType.f32: "f32", + DataType.tf32: "tf32", + DataType.f64: "f64", + DataType.cf16: "cf16", + DataType.cbf16: "cbf16", + DataType.cf32: "cf32", + DataType.ctf32: "ctf32", + DataType.cf64: "cf64", + DataType.cu4: "cu4", + DataType.cu8: "cu8", + DataType.cu16: "cu16", + DataType.cu32: "cu32", + DataType.cu64: "cu64", + DataType.cs4: "cs4", + DataType.cs8: "cs8", + DataType.cs16: "cs16", + DataType.cs32: "cs32", + DataType.cs64: "cs64", +} + +DataTypeTag = { + DataType.void: "void", + DataType.b1: "cutlass::uint1b_t", + DataType.u4: "cutlass::uint4b_t", + DataType.u8: "uint8_t", + DataType.u16: "uint16_t", + DataType.u32: "uint32_t", + DataType.u64: "uint64_t", + DataType.s4: "cutlass::int4b_t", + DataType.s8: "int8_t", + DataType.s16: "int16_t", + DataType.s32: "int32_t", + DataType.s64: "int64_t", + DataType.e4m3: 'cutlass::float_e4m3_t', + DataType.e5m2: 'cutlass::float_e5m2_t', + DataType.f16: "cutlass::half_t", + DataType.bf16: "cutlass::bfloat16_t", + DataType.f32: "float", + DataType.tf32: "cutlass::tfloat32_t", + DataType.f64: "double", + DataType.cf16: "cutlass::complex", + DataType.cbf16: "cutlass::complex", + DataType.cf32: "cutlass::complex", + DataType.ctf32: "cutlass::complex", + DataType.cf64: "cutlass::complex", + DataType.cu4: "cutlass::complex", + DataType.cu8: "cutlass::complex", + DataType.cu16: "cutlass::complex", + DataType.cu32: "cutlass::complex", + DataType.cu64: "cutlass::complex", + DataType.cs4: "cutlass::complex", + DataType.cs8: "cutlass::complex", + DataType.cs16: "cutlass::complex", + DataType.cs32: "cutlass::complex", + DataType.cs64: "cutlass::complex", +} + +DataTypeSize = { + DataType.void: 0, + DataType.b1: 1, + DataType.u4: 4, + DataType.u8: 8, + DataType.u16: 16, + DataType.u32: 32, + DataType.u64: 64, + DataType.s4: 4, + DataType.s8: 8, + DataType.s16: 16, + DataType.s32: 32, + DataType.s64: 64, + DataType.e4m3: 8, + DataType.e5m2: 8, + DataType.f16: 16, + DataType.bf16: 16, + DataType.f32: 32, + DataType.tf32: 32, + DataType.f64: 64, + DataType.cf16: 32, + DataType.cbf16: 32, + DataType.cf32: 64, + DataType.ctf32: 32, + DataType.cf64: 128, + DataType.cu4: 8, + DataType.cu8: 16, + DataType.cu16: 32, + DataType.cu32: 64, + DataType.cu64: 128, + DataType.cs4: 8, + DataType.cs8: 16, + DataType.cs16: 32, + DataType.cs32: 64, + DataType.cs64: 128, +} + +################################################################################################### +# +class BlasMode(enum.Enum): + symmetric = enum_auto() + hermitian = enum_auto() + +# +BlasModeTag = { + BlasMode.symmetric: 'cutlass::BlasMode::kSymmetric', + BlasMode.hermitian: 'cutlass::BlasMode::kHermitian', +} + +# +class ComplexTransform(enum.Enum): + none = enum_auto() + conj = enum_auto() + +# +ComplexTransformTag = { + ComplexTransform.none: 'cutlass::ComplexTransform::kNone', + ComplexTransform.conj: 'cutlass::ComplexTransform::kConjugate', +} + +# +RealComplexBijection = [ + (DataType.f16, DataType.cf16), + (DataType.f32, DataType.cf32), + (DataType.f64, DataType.cf64), +] + +# +def is_complex(data_type): + for r, c in RealComplexBijection: + if data_type == c: + return True + return False + +# +def get_complex_from_real(real_type): + for r, c in RealComplexBijection: + if real_type == r: + return c + return DataType.invalid + +# +def get_real_from_complex(complex_type): + for r, c in RealComplexBijection: + if complex_type == c: + return r + return DataType.invalid + +# +class ComplexMultiplyOp(enum.Enum): + multiply_add = enum_auto() + gaussian = enum_auto() + +################################################################################################### + +# +class MathOperation(enum.Enum): + multiply_add = enum_auto() + multiply_add_saturate = enum_auto() + xor_popc = enum_auto() + and_popc = enum_auto() + multiply_add_fast_bf16 = enum_auto() + multiply_add_fast_f16 = enum_auto() + multiply_add_fast_f32 = enum_auto() + multiply_add_complex_fast_f32 = enum_auto() + multiply_add_complex = enum_auto() + multiply_add_complex_gaussian = enum_auto() + +# +MathOperationTag = { + MathOperation.multiply_add: 'cutlass::arch::OpMultiplyAdd', + MathOperation.multiply_add_saturate: 'cutlass::arch::OpMultiplyAddSaturate', + MathOperation.xor_popc: 'cutlass::arch::OpXorPopc', + MathOperation.and_popc: 'cutlass::arch::OpAndPopc', + MathOperation.multiply_add_fast_bf16: 'cutlass::arch::OpMultiplyAddFastBF16', + MathOperation.multiply_add_fast_f16: 'cutlass::arch::OpMultiplyAddFastF16', + MathOperation.multiply_add_fast_f32: 'cutlass::arch::OpMultiplyAddFastF32', + MathOperation.multiply_add_complex_fast_f32: 'cutlass::arch::OpMultiplyAddComplexFastF32', + MathOperation.multiply_add_complex: 'cutlass::arch::OpMultiplyAddComplex', + MathOperation.multiply_add_complex_gaussian: 'cutlass::arch::OpMultiplyAddGaussianComplex', +} + +################################################################################################### + +# +class LayoutType(enum.Enum): + ColumnMajor = enum_auto() + RowMajor = enum_auto() + ColumnMajorInterleaved2 = enum_auto() + RowMajorInterleaved2 = enum_auto() + ColumnMajorInterleaved32 = enum_auto() + RowMajorInterleaved32 = enum_auto() + ColumnMajorInterleaved64 = enum_auto() + RowMajorInterleaved64 = enum_auto() + TensorNHWC = enum_auto() + TensorNDHWC = enum_auto() + TensorNCHW = enum_auto() + TensorNGHWC = enum_auto() + TensorNC32HW32 = enum_auto() + TensorNC64HW64 = enum_auto() + TensorC32RSK32 = enum_auto() + TensorC64RSK64 = enum_auto() + +# +LayoutTag = { + LayoutType.ColumnMajor: 'cutlass::layout::ColumnMajor', + LayoutType.RowMajor: 'cutlass::layout::RowMajor', + LayoutType.ColumnMajorInterleaved2: 'cutlass::layout::ColumnMajorInterleaved<2>', + LayoutType.RowMajorInterleaved2: 'cutlass::layout::RowMajorInterleaved<2>', + LayoutType.ColumnMajorInterleaved32: 'cutlass::layout::ColumnMajorInterleaved<32>', + LayoutType.RowMajorInterleaved32: 'cutlass::layout::RowMajorInterleaved<32>', + LayoutType.ColumnMajorInterleaved64: 'cutlass::layout::ColumnMajorInterleaved<64>', + LayoutType.RowMajorInterleaved64: 'cutlass::layout::RowMajorInterleaved<64>', + LayoutType.TensorNHWC: 'cutlass::layout::TensorNHWC', + LayoutType.TensorNDHWC: 'cutlass::layout::TensorNDHWC', + LayoutType.TensorNCHW: 'cutlass::layout::TensorNCHW', + LayoutType.TensorNGHWC: 'cutlass::layout::TensorNGHWC', + LayoutType.TensorNC32HW32: 'cutlass::layout::TensorNCxHWx<32>', + LayoutType.TensorC32RSK32: 'cutlass::layout::TensorCxRSKx<32>', + LayoutType.TensorNC64HW64: 'cutlass::layout::TensorNCxHWx<64>', + LayoutType.TensorC64RSK64: 'cutlass::layout::TensorCxRSKx<64>', +} + +# +TransposedLayout = { + LayoutType.ColumnMajor: LayoutType.RowMajor, + LayoutType.RowMajor: LayoutType.ColumnMajor, + LayoutType.ColumnMajorInterleaved2: LayoutType.RowMajorInterleaved2, + LayoutType.RowMajorInterleaved2: LayoutType.ColumnMajorInterleaved2, + LayoutType.ColumnMajorInterleaved32: LayoutType.RowMajorInterleaved32, + LayoutType.RowMajorInterleaved32: LayoutType.ColumnMajorInterleaved32, + LayoutType.ColumnMajorInterleaved64: LayoutType.RowMajorInterleaved64, + LayoutType.RowMajorInterleaved64: LayoutType.ColumnMajorInterleaved64, + LayoutType.TensorNHWC: LayoutType.TensorNHWC +} + +# +ShortLayoutTypeNames = { + LayoutType.ColumnMajor: 'n', + LayoutType.ColumnMajorInterleaved2: 'n2', + LayoutType.ColumnMajorInterleaved32: 'n32', + LayoutType.ColumnMajorInterleaved64: 'n64', + LayoutType.RowMajor: 't', + LayoutType.RowMajorInterleaved2: 't2', + LayoutType.RowMajorInterleaved32: 't32', + LayoutType.RowMajorInterleaved64: 't64', + LayoutType.TensorNHWC: 'nhwc', + LayoutType.TensorNDHWC: 'ndhwc', + LayoutType.TensorNCHW: 'nchw', + LayoutType.TensorNGHWC: 'nghwc', + LayoutType.TensorNC32HW32: 'nc32hw32', + LayoutType.TensorNC64HW64: 'nc64hw64', + LayoutType.TensorC32RSK32: 'c32rsk32', + LayoutType.TensorC64RSK64: 'c64rsk64' +} + +# +ShortComplexLayoutNames = { + (LayoutType.ColumnMajor, ComplexTransform.none): 'n', + (LayoutType.ColumnMajor, ComplexTransform.conj): 'c', + (LayoutType.RowMajor, ComplexTransform.none): 't', + (LayoutType.RowMajor, ComplexTransform.conj): 'h' +} + +################################################################################################### +class KernelScheduleType(enum.Enum): + ScheduleAuto = enum_auto() + Multistage = enum_auto() + Tma = enum_auto() + TmaWarpSpecialized = enum_auto() + TmaWarpSpecializedPingpong = enum_auto() + TmaWarpSpecializedCooperative = enum_auto() + TmaWarpSpecializedFP8FastAccum = enum_auto() + TmaWarpSpecializedCooperativeFP8FastAccum = enum_auto() + TmaWarpSpecializedPingpongFP8FastAccum = enum_auto() +# +KernelScheduleTag = { + KernelScheduleType.ScheduleAuto: 'cutlass::gemm::collective::KernelScheduleAuto', + KernelScheduleType.Multistage: 'cutlass::gemm::KernelMultistage', + KernelScheduleType.Tma: 'cutlass::gemm::KernelTma', + KernelScheduleType.TmaWarpSpecialized: 'cutlass::gemm::KernelTmaWarpSpecialized', + KernelScheduleType.TmaWarpSpecializedPingpong: 'cutlass::gemm::KernelTmaWarpSpecializedPingpong', + KernelScheduleType.TmaWarpSpecializedCooperative: 'cutlass::gemm::KernelTmaWarpSpecializedCooperative', + KernelScheduleType.TmaWarpSpecializedFP8FastAccum: 'cutlass::gemm::KernelTmaWarpSpecializedFP8FastAccum', + KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum: 'cutlass::gemm::KernelTmaWarpSpecializedCooperativeFP8FastAccum', + KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum: 'cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum', +} + +# +KernelScheduleSuffixes = { + KernelScheduleType.ScheduleAuto: '', + KernelScheduleType.Multistage: '_cpasync', + KernelScheduleType.Tma: '_unspecialized', + KernelScheduleType.TmaWarpSpecialized: '_warpspecialized', + KernelScheduleType.TmaWarpSpecializedPingpong: '_warpspecialized_pingpong', + KernelScheduleType.TmaWarpSpecializedCooperative: '_warpspecialized_cooperative', + KernelScheduleType.TmaWarpSpecializedFP8FastAccum: '_warpspecialized_fp8_fastaccum', + KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum: '_warpspecialized_cooperative_fp8_fastaccum', + KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum: '_warpspecialized_pingpong_fp8_fastaccum', +} + +class EpilogueScheduleType(enum.Enum): + ScheduleAuto = enum_auto() + EpilogueTransposed = enum_auto() + NoSmemWarpSpecialized = enum_auto() + TmaWarpSpecialized = enum_auto() + TmaWarpSpecializedCooperative = enum_auto() +# +EpilogueScheduleTag = { + EpilogueScheduleType.ScheduleAuto: 'cutlass::epilogue::collective::EpilogueScheduleAuto', + EpilogueScheduleType.EpilogueTransposed: 'cutlass::gemm::EpilogueTransposed', + EpilogueScheduleType.NoSmemWarpSpecialized: 'cutlass::epilogue::NoSmemWarpSpecialized', + EpilogueScheduleType.TmaWarpSpecialized: 'cutlass::epilogue::TmaWarpSpecialized', + EpilogueScheduleType.TmaWarpSpecializedCooperative: 'cutlass::epilogue::TmaWarpSpecializedCooperative', +} + +# +EpilogueScheduleSuffixes = { + EpilogueScheduleType.ScheduleAuto: '', + EpilogueScheduleType.EpilogueTransposed: '', + EpilogueScheduleType.NoSmemWarpSpecialized: '_epi_nosmem', + EpilogueScheduleType.TmaWarpSpecialized: '_epi_tma', + EpilogueScheduleType.TmaWarpSpecializedCooperative: '_epi_tma', +} + +class TileSchedulerType(enum.Enum): + Default = enum_auto() + Persistent = enum_auto() + StreamK = enum_auto() +# +TileSchedulerTag = { + TileSchedulerType.Default: 'void', + TileSchedulerType.Persistent: 'cutlass::gemm::PersistentScheduler', + TileSchedulerType.StreamK: 'cutlass::gemm::StreamKScheduler', +} + +# +TileSchedulerSuffixes = { + TileSchedulerType.Default: '', + TileSchedulerType.Persistent: '', + TileSchedulerType.StreamK: '_stream_k', +} + +################################################################################################### + +# +class SideMode(enum.Enum): + Left = enum_auto() + Right = enum_auto() + +# +SideModeTag = { + SideMode.Left: 'cutlass::SideMode::kLeft', + SideMode.Right: 'cutlass::SideMode::kRight' +} + +# +ShortSideModeNames = { + SideMode.Left: 'ls', + SideMode.Right: 'rs' +} + +################################################################################################### + +# +class FillMode(enum.Enum): + Lower = enum_auto() + Upper = enum_auto() + +# +FillModeTag = { + FillMode.Lower: 'cutlass::FillMode::kLower', + FillMode.Upper: 'cutlass::FillMode::kUpper' +} + +# +ShortFillModeNames = { + FillMode.Lower: 'l', + FillMode.Upper: 'u' +} + +################################################################################################### + +# +class DiagType(enum.Enum): + NonUnit = enum_auto() + Unit = enum_auto() + +# +DiagTypeTag = { + DiagType.NonUnit: 'cutlass::DiagType::kNonUnit', + DiagType.Unit: 'cutlass::DiagType::kUnit' +} + +# +ShortDiagTypeNames = { + DiagType.NonUnit: 'nu', + DiagType.Unit: 'un' +} + +################################################################################################### + +# +class OpcodeClass(enum.Enum): + Simt = enum_auto() + TensorOp = enum_auto() + WmmaTensorOp = enum_auto() + SparseTensorOp = enum_auto() + + +OpcodeClassNames = { + OpcodeClass.Simt: 'simt', + OpcodeClass.TensorOp: 'tensorop', + OpcodeClass.WmmaTensorOp: 'wmma_tensorop', +} + +OpcodeClassTag = { + OpcodeClass.Simt: 'cutlass::arch::OpClassSimt', + OpcodeClass.TensorOp: 'cutlass::arch::OpClassTensorOp', + OpcodeClass.WmmaTensorOp: 'cutlass::arch::OpClassWmmaTensorOp', +} + +################################################################################################### + +# +class OperationKind(enum.Enum): + Gemm = enum_auto() + RankK = enum_auto() + Rank2K = enum_auto() + Trmm = enum_auto() + Symm = enum_auto() + Conv2d = enum_auto() + Conv3d = enum_auto() + +# +OperationKindNames = { + OperationKind.Gemm: 'gemm' + , OperationKind.RankK: 'rank_k' + , OperationKind.Rank2K: 'rank_2k' + , OperationKind.Trmm: 'trmm' + , OperationKind.Symm: 'symm' + , OperationKind.Conv2d: 'conv2d' + , OperationKind.Conv3d: 'conv3d' +} + +# +class Target(enum.Enum): + library = enum_auto() +# +ArchitectureNames = { + 50: 'maxwell', + 60: 'pascal', + 61: 'pascal', + 70: 'volta', + 75: 'turing', + 80: 'ampere', + 89: 'ada', + 90: 'hopper' +} + +# +SharedMemPerCC = { + 70: 96, # 96KB of SMEM + 72: 96, # 96KB of SMEM + 75: 64, # 64KB of SMEM + 80: 163, # 163KB of SMEM - 1KB reserved for the driver + 86: 99, # 99KB of SMEM - 1KB reserved for the driver + 87: 163, # 163KB of SMEM - 1KB reserved for the driver + 89: 99, # 99KB of SMEM - 1KB reserved for the driver + 90: 227, # 227KB of SMEM - 1KB reserved for the driver +} + +################################################################################################### + +# +def SubstituteTemplate(template, values): + text = template + changed = True + while changed: + changed = False + for key, value in values.items(): + regex = "\\$\\{%s\\}" % key + newtext = re.sub(regex, value, text) + if newtext != text: + changed = True + text = newtext + return text + +################################################################################################### + +# +class GemmKind(enum.Enum): + Gemm = enum_auto() + Sparse = enum_auto() + Universal = enum_auto() + Universal3x = enum_auto() + PlanarComplex = enum_auto() + PlanarComplexArray = enum_auto() + Grouped = enum_auto() + +# +GemmKindNames = { + GemmKind.Gemm: "gemm", + GemmKind.Sparse: "spgemm", + GemmKind.Universal: "gemm", + GemmKind.Universal3x: "gemm", + GemmKind.PlanarComplex: "gemm_planar_complex", + GemmKind.PlanarComplexArray: "gemm_planar_complex_array", + GemmKind.Grouped: "gemm_grouped" +} + +# +class RankKKind(enum.Enum): + Universal = enum_auto() + +# +RankKKindNames = { + RankKKind.Universal: "rank_k" +} + +# +class TrmmKind(enum.Enum): + Universal = enum_auto() + +# +TrmmKindNames = { + TrmmKind.Universal: "trmm" +} + +# +class SymmKind(enum.Enum): + Universal = enum_auto() + +# +SymmKindNames = { + SymmKind.Universal: "symm" +} + +# +class EpilogueFunctor(enum.Enum): + LinearCombination = enum_auto() + LinearCombinationClamp = enum_auto() + +# +EpilogueFunctorTag = { + EpilogueFunctor.LinearCombination: 'cutlass::epilogue::thread::LinearCombination', + EpilogueFunctor.LinearCombinationClamp: 'cutlass::epilogue::thread::LinearCombinationClamp', +} + +# +class SwizzlingFunctor(enum.Enum): + Identity1 = enum_auto() + Identity2 = enum_auto() + Identity4 = enum_auto() + Identity8 = enum_auto() + Horizontal = enum_auto() + StridedDgradIdentity1 = enum_auto() + StridedDgradIdentity4 = enum_auto() + StridedDgradHorizontal = enum_auto() + StreamK = enum_auto() + +# +SwizzlingFunctorTag = { + SwizzlingFunctor.Identity1: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>', + SwizzlingFunctor.Identity2: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<2>', + SwizzlingFunctor.Identity4: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<4>', + SwizzlingFunctor.Identity8: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>', + SwizzlingFunctor.Horizontal: 'cutlass::gemm::threadblock::GemmHorizontalThreadblockSwizzle', + SwizzlingFunctor.StridedDgradIdentity1: 'cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<1>', + SwizzlingFunctor.StridedDgradIdentity4: 'cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<4>', + SwizzlingFunctor.StridedDgradHorizontal: 'cutlass::conv::threadblock::StridedDgradHorizontalThreadblockSwizzle', + SwizzlingFunctor.StreamK: 'cutlass::gemm::threadblock::ThreadblockSwizzleStreamK', +} + +# +class GroupScheduleMode(enum.Enum): + Device = enum_auto(), + Host = enum_auto() + +# +GroupScheduleModeTag = { + GroupScheduleMode.Device: 'cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly', + GroupScheduleMode.Host: 'cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute' +} + +# +ShortGroupScheduleModeNames = { + GroupScheduleMode.Device: 'Device', + GroupScheduleMode.Host: 'Host' +} + +################################################################################################### + +# +class ConvKind(enum.IntEnum): + Fprop = 0 + Dgrad = 1 + Wgrad = 2 + +# +ConvKindTag = { + ConvKind.Fprop: 'cutlass::conv::Operator::kFprop', + ConvKind.Dgrad: 'cutlass::conv::Operator::kDgrad', + ConvKind.Wgrad: 'cutlass::conv::Operator::kWgrad' +} + +ConvKindNames = { + ConvKind.Fprop: 'fprop', + ConvKind.Dgrad: 'dgrad', + ConvKind.Wgrad: 'wgrad', +} + +class ConvMode(enum.IntEnum): + CrossCorrelation = 0 + Convolution = 1 + +# +class IteratorAlgorithm(enum.Enum): + Analytic = 0 + Optimized = 1 + FixedChannels = 2 + FewChannels = 3 + FixedStrideDilation = 4 + +# +IteratorAlgorithmTag = { + IteratorAlgorithm.Analytic: 'cutlass::conv::IteratorAlgorithm::kAnalytic', + IteratorAlgorithm.Optimized: 'cutlass::conv::IteratorAlgorithm::kOptimized', + IteratorAlgorithm.FixedChannels: 'cutlass::conv::IteratorAlgorithm::kFixedChannels', + IteratorAlgorithm.FewChannels: 'cutlass::conv::IteratorAlgorithm::kFewChannels', + IteratorAlgorithm.FixedStrideDilation: 'cutlass::conv::IteratorAlgorithm::kFixedStrideDilation' +} + +IteratorAlgorithmNames = { + IteratorAlgorithm.Analytic: 'analytic', + IteratorAlgorithm.Optimized: 'optimized', + IteratorAlgorithm.FixedChannels: 'fixed_channels', + IteratorAlgorithm.FewChannels: 'few_channels', + IteratorAlgorithm.FixedStrideDilation: 'fixed_stride_dilation' +} + +# +class StrideSupport(enum.Enum): + Strided = 0 + Unity = 1 + Fixed = 2 + +# +StrideSupportTag = { + StrideSupport.Strided: 'cutlass::conv::StrideSupport::kStrided', + StrideSupport.Unity: 'cutlass::conv::StrideSupport::kUnity', + StrideSupport.Fixed: 'cutlass::conv::StrideSupport::kFixed' +} + +StrideSupportNames = { + StrideSupport.Strided: '', + StrideSupport.Unity: 'unity_stride', + StrideSupport.Fixed: 'fixed_stride' +} + +# +class GroupMode(enum.Enum): + NoneGroup = enum_auto() # dense conv (G=1) + SingleGroup = enum_auto() # grouped convolution (single group per CTA) + MultipleGroup = enum_auto() # grouped convolution ( multiple groups per CTA) + Depthwise = enum_auto() # Depthwise convolution ( C=K=G ) + +# +GroupModeTag = { + GroupMode.NoneGroup: 'cutlass::conv::GroupMode::kNone', + GroupMode.SingleGroup: 'cutlass::conv::GroupMode::kSingleGroup', + GroupMode.MultipleGroup: 'cutlass::conv::GroupMode::kMultipleGroup', + GroupMode.Depthwise: 'cutlass::conv::GroupMode::kDepthwise', +} + +GroupModeNames = { + GroupMode.NoneGroup: '', + GroupMode.SingleGroup: 'single_group', + GroupMode.MultipleGroup: 'multiple_group', + GroupMode.Depthwise: 'depthwise', +} + +################################################################################################### + +# +class MathInstruction: + def __init__(self, instruction_shape, element_a, element_b, element_accumulator, opcode_class, math_operation = MathOperation.multiply_add): + self.instruction_shape = instruction_shape + self.element_a = element_a + self.element_b = element_b + self.element_accumulator = element_accumulator + self.opcode_class = opcode_class + self.math_operation = math_operation + +# +class TileDescription: + + def __init__(self, threadblock_shape, stages, warp_count, math_instruction, min_compute, max_compute, cluster_shape = [1,1,1]): + self.threadblock_shape = threadblock_shape + self.tile_shape = threadblock_shape + self.stages = stages + self.warp_count = warp_count + self.math_instruction = math_instruction + self.minimum_compute_capability = min_compute + self.maximum_compute_capability = max_compute + self.cluster_shape = cluster_shape + + def procedural_name(self): + if self.minimum_compute_capability >= 90: + return "{tbm}x{tbn}x{tbk}_{cm}x{cn}x{ck}_{s}".format( + tbm = self.threadblock_shape[0], + tbn = self.threadblock_shape[1], + tbk = self.threadblock_shape[2], + cm = self.cluster_shape[0], + cn = self.cluster_shape[1], + ck = self.cluster_shape[2], + s = self.stages) + else: + return "%dx%d_%dx%d" % (self.threadblock_shape[0], self.threadblock_shape[1], self.threadblock_shape[2], self.stages) + +# +class Direct2dConvFixedStrideDilationTileDescription: + def __init__(self, threadblock_output_shape, filter_shape, stages, stride, dilation, warp_count, math_instruction, min_compute, max_compute): + self.threadblock_shape = [threadblock_output_shape[0]*threadblock_output_shape[1]*threadblock_output_shape[2], threadblock_output_shape[3], filter_shape[0]*filter_shape[1]] + self.threadblock_output_shape = threadblock_output_shape + self.filter_shape = filter_shape + self.stages = stages + self.warp_count = warp_count + self.stride = stride + self.dilation = dilation + self.math_instruction = math_instruction + self.minimum_compute_capability = min_compute + self.maximum_compute_capability = max_compute + + def procedural_name(self): + str_name = "%dx%dx%d_%dx%dx%dx%d_%d_filter%dx%d" % (self.threadblock_shape[0], + self.threadblock_shape[1], + self.threadblock_shape[2], + self.threadblock_output_shape[0], + self.threadblock_output_shape[1], + self.threadblock_output_shape[2], + self.threadblock_output_shape[3], + self.stages, + self.filter_shape[0], + self.filter_shape[1]) + # Fixed Strided and dilation + if self.stride != [-1, -1] and self.dilation != [-1, -1]: + str_name += "_stride%dx%d_dilation%dx%d" % (self.stride[0], + self.stride[1], + self.dilation[0], + self.dilation[1]) + return str_name + +# +class Direct2dConvFixedStrideDilationTileDescription: + def __init__(self, threadblock_output_shape, filter_shape, stages, stride, dilation, warp_count, math_instruction, min_compute, max_compute): + self.threadblock_shape = [threadblock_output_shape[0]*threadblock_output_shape[1]*threadblock_output_shape[2], threadblock_output_shape[3], filter_shape[0]*filter_shape[1]] + self.threadblock_output_shape = threadblock_output_shape + self.filter_shape = filter_shape + self.stages = stages + self.warp_count = warp_count + self.stride = stride + self.dilation = dilation + self.math_instruction = math_instruction + self.minimum_compute_capability = min_compute + self.maximum_compute_capability = max_compute + + def procedural_name(self): + str_name = "%dx%dx%d_%dx%dx%dx%d_%d_filter%dx%d" % (self.threadblock_shape[0], + self.threadblock_shape[1], + self.threadblock_shape[2], + self.threadblock_output_shape[0], + self.threadblock_output_shape[1], + self.threadblock_output_shape[2], + self.threadblock_output_shape[3], + self.stages, + self.filter_shape[0], + self.filter_shape[1]) + # Fixed Strided and dilation + if self.stride != [-1, -1] and self.dilation != [-1, -1]: + str_name += "_stride%dx%d_dilation%dx%d" % (self.stride[0], + self.stride[1], + self.dilation[0], + self.dilation[1]) + return str_name + +# +class TensorDescription: + def __init__(self, element, layout, alignment = 1, complex_transform = ComplexTransform.none): + self.element = element + self.layout = layout + self.alignment = alignment + self.complex_transform = complex_transform + +# +class SymmetricTensorDescription: + def __init__(self, element, layout, fill_mode, alignment = 1, complex_transform = ComplexTransform.none, side_mode = SideMode.Left): + self.element = element + self.layout = layout + self.fill_mode = fill_mode + self.alignment = alignment + self.complex_transform = complex_transform + self.side_mode = side_mode + +# +class TriangularTensorDescription: + def __init__(self, element, layout, side_mode, fill_mode, diag_type, alignment = 1, complex_transform = ComplexTransform.none): + self.element = element + self.layout = layout + self.side_mode = side_mode + self.fill_mode = fill_mode + self.diag_type = diag_type + self.alignment = alignment + self.complex_transform = complex_transform + +# +def CalculateSmemUsage(operation): + cta_shape = operation.tile_description.threadblock_shape + stages = operation.tile_description.stages + + if operation.operation_kind == OperationKind.Gemm and operation.gemm_kind == GemmKind.Sparse: + # Elements represented by 8 bits of metadata (based on 4:8, 2:4 or 1:2 sparsity) + if DataTypeSize[operation.A.element] == 32: + elements_per_8b_md = 2 + elif DataTypeSize[operation.A.element] == 4: + elements_per_8b_md = 8 + else: + elements_per_8b_md = 4 + + smem_per_stage = DataTypeSize[operation.A.element] * cta_shape[0] * (cta_shape[2] // 2) // 8 + \ + DataTypeSize[operation.B.element] * cta_shape[1] * cta_shape[2] // 8 + \ + cta_shape[0] * (cta_shape[2] // 2) // elements_per_8b_md + else: + # Few BLAS3 operations only have A tensor + smem_per_stage = DataTypeSize[operation.A.element] * cta_shape[0] * cta_shape[2] // 8 + \ + DataTypeSize[operation.A.element] * cta_shape[1] * cta_shape[2] // 8 + + smem_usage = smem_per_stage * stages + return (smem_usage >> 10) + + +class GemmUniversalMode(enum.IntEnum): + """ + Types corresponding to GemmUniversalMode + """ + Gemm = 0 + GemmSplitKParallel = 1 + Batched = 2 + Array = 3 + + +class SplitKMode(enum.IntEnum): + """ + Types corresponding to SplitKMode + """ + NoneSplitK = 0 + Serial = 1 + Parallel = 2 diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/manifest.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/manifest.py new file mode 100644 index 0000000000000000000000000000000000000000..07427d6a88c27e30050a4344fe9eaa9e3be2b56b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/manifest.py @@ -0,0 +1,683 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Utilities for filtering CUTLASS library kernels and emitting library intitialization +and building code +""" + +import enum +import os.path +import shutil + +from cutlass_library.library import * +from cutlass_library.gemm_operation import * +from cutlass_library.rank_k_operation import * +from cutlass_library.rank_2k_operation import * +from cutlass_library.trmm_operation import * +from cutlass_library.symm_operation import * +from cutlass_library.conv2d_operation import * +from cutlass_library.conv3d_operation import * +import logging + +################################################################################################### +_LOGGER = logging.getLogger(__name__) + + +class EmitOperationKindAll: + def __init__(self, generated_path, kind, args): + self.generated_path = generated_path + self.kind = kind + self.args = args + + self.header_template =""" +/* + Generated by manifest.py - Do not edit. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + + self.entry_template = """ + +// +// Entry point to construct operations +// +void initialize_all_${operation_name}_operations(Manifest &manifest) { +""" + self.configuration_prototype_template = "void initialize_${configuration_name}(Manifest &manifest);\n" + self.configuration_template =" initialize_${configuration_name}(manifest);\n" + + self.epilogue_template ="""} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +""" + + # + def __enter__(self): + self.operation_path = os.path.join(self.generated_path, OperationKindNames[self.kind]) + os.makedirs(self.operation_path, exist_ok=True) + + self.top_level_path = os.path.join(self.operation_path, f"all_{OperationKindNames[self.kind]}_operations.cu") + + self.top_level_file = open(self.top_level_path, "w") + self.top_level_file.write(self.header_template) + + self.source_files = [self.top_level_path,] + + self.configurations = [] + + return self + + # + def emit(self, operations): + for min_cc, configurations in sorted(operations.items()): + for configuration_name, _ in configurations.items(): + self.configurations.append(configuration_name) + self.top_level_file.write(SubstituteTemplate(self.configuration_prototype_template, {'configuration_name': configuration_name} )) + + # + def __exit__(self, exception_type, exception_value, traceback): + self.top_level_file.write(SubstituteTemplate(self.entry_template, {'operation_name': OperationKindNames[self.kind]})) + + for configuration_name in self.configurations: + self.top_level_file.write(SubstituteTemplate(self.configuration_template, {'configuration_name': configuration_name})) + + self.top_level_file.write(self.epilogue_template) + self.top_level_file.close() + + +class EmitOperationKindLibrary: + def __init__(self, generated_path, min_cc, kind, args): + self.generated_path = generated_path + self.min_cc = min_cc + self.kind = kind + self.args = args + self.emitters = { + OperationKind.Gemm: EmitGemmConfigurationLibrary, + OperationKind.Conv2d: EmitConv2dConfigurationLibrary, + OperationKind.Conv3d: EmitConv3dConfigurationLibrary, + OperationKind.RankK: EmitRankKConfigurationLibrary, + OperationKind.Rank2K: EmitRank2KConfigurationLibrary, + OperationKind.Trmm: EmitTrmmConfigurationLibrary, + OperationKind.Symm: EmitSymmConfigurationLibrary + } + + self.header_template =""" +/* + Generated by manifest.py - Do not edit. +*/ + +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + self.entry_template = """ + +// +// Entry point to construct operations +// +void initialize_all_sm${min_cc}_${subclass_name}_${operation_name}_operations(Manifest &manifest) { +""" + self.configuration_prototype_template = "void initialize_${configuration_name}(Manifest &manifest);\n" + self.configuration_template = " initialize_${configuration_name}(manifest);\n" + self.subclass_call_template = " initialize_all_sm${min_cc}_${subclass_name}_${operation_name}_operations(manifest);\n" + + self.epilogue_template ="""} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +""" + + # + def __enter__(self): + self.operation_path = os.path.join(self.generated_path, OperationKindNames[self.kind], str(self.min_cc)) + os.makedirs(self.operation_path) + + self.top_level_path = os.path.join(self.operation_path, f"all_sm{self.min_cc}_{OperationKindNames[self.kind]}_operations.cu") + + self.top_level_file = open(self.top_level_path, "w") + self.top_level_file.write(self.header_template) + + self.source_files = {} + + # Each {operation_kind x cc} combination is further decomposed by the instruction + # types used. This dictionary used to track the file handles for the top-level + # files of each subclass + self.subclass_files = {} + + # Configurations in each sub class + self.subclass_configurations = {} + + return self + + # + def emit(self, configuration_name, operations): + assert len(operations) > 0 + + # The extended name for all operations of a given configuration_name is guaranteed + # to be the same because extended_name() is used in defining configuration_name. Thus, + # we can safely use the extended_name() of the first operation. + extended_name = operations[0].extended_name() + + # Create a directory for operations with this subclass if it does not exist + if extended_name not in self.subclass_files: + subclass_path = os.path.join(self.operation_path, extended_name) + os.mkdir(subclass_path) + + self.subclass_configurations[extended_name] = [] + + # Open a new top-level file for this sub class + subclass_top_level_path = os.path.join( + subclass_path, f"all_sm{self.min_cc}_{extended_name}_{OperationKindNames[self.kind]}_operations.cu") + self.subclass_files[extended_name] = open(subclass_top_level_path, "w") + self.subclass_files[extended_name].write(self.header_template) + + self.source_files[extended_name] = [subclass_top_level_path] + + subclass_dir = os.path.dirname(self.subclass_files[extended_name].name) + with self.emitters[self.kind](subclass_dir, configuration_name) as configuration_emitter: + for operation in operations: + configuration_emitter.emit(operation) + + self.source_files[extended_name].append(configuration_emitter.configuration_path) + + self.subclass_configurations[extended_name].append(configuration_name) + self.subclass_files[extended_name].write(SubstituteTemplate(self.configuration_prototype_template, {'configuration_name': configuration_name} )) + + # + def __exit__(self, exception_type, exception_value, traceback): + + self.top_level_file.write( + SubstituteTemplate(self.entry_template, { + 'min_cc': str(self.min_cc), + 'subclass_name': '', + 'operation_name': OperationKindNames[self.kind] + })) + + # Finish and close all subclass files + for subclass_name, subclass_file in sorted(self.subclass_files.items()): + subclass_cfg = { + 'min_cc': str(self.min_cc), + 'subclass_name': subclass_name, + 'operation_name': OperationKindNames[self.kind] + } + subclass_file.write(SubstituteTemplate(self.entry_template, subclass_cfg)) + + for configuration in self.subclass_configurations[subclass_name]: + subclass_file.write( + SubstituteTemplate(self.configuration_template, { + 'configuration_name': configuration + })) + + subclass_file.write(self.epilogue_template) + subclass_file.close() + + # Write the call to initialize_all for this subclass to the top-level file + self.top_level_file.write(SubstituteTemplate(self.subclass_call_template, subclass_cfg)) + + self.top_level_file.write(self.epilogue_template) + self.top_level_file.close() + +class EmitInterfaceLibrary: + def __init__(self, generated_path, operation_count, args): + self.generated_path = generated_path + self.args = args + + self.prototypes = [] + self.fn_calls = [] + self.operation_count = str(operation_count) + + self.top_level_hdr_template = ''' +/* + Generated by manifest.py - Do not edit. +*/ +''' + self.top_level_prologue = ''' + +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +namespace cutlass { +\tnamespace library { + +${prototypes} +''' + + self.top_level_initialize_kind = ''' +\t\tvoid initialize_all_${kind}_operations(Manifest &manifest) { +${fn_calls} +\t\t} +''' + + self.top_level_initialize = ''' +\t\tvoid initialize_all(Manifest &manifest) { +\t\t\tmanifest.reserve(${operation_count});\n +${fn_calls} +\t\t} +''' + + self.top_level_suffix = ''' +\t} // namespace library +} // namespace cutlass + +''' + + # + def __enter__(self): + self.top_level_path = os.path.join(self.generated_path, 'initialize_all.cpp') + + self.top_level_file = open(self.top_level_path, "w") + self.top_level_file.write(self.top_level_hdr_template) + + self.source_files = [self.top_level_path,] + + return self + + # + def emit(self, operation_name): + self.prototypes.append(SubstituteTemplate( + "\t\tvoid initialize_all_${operation_kind}_operations(Manifest &manifest);", + {'operation_kind': operation_name})) + + self.fn_calls.append(SubstituteTemplate( + "\t\t\tinitialize_all_${operation_kind}_operations(manifest);", + {'operation_kind': operation_name})) + + # + def __exit__(self, exception_type, exception_value, traceback): + self.top_level_file.write(SubstituteTemplate(self.top_level_prologue, {'prototypes':"\n".join(self.prototypes)})) + + # Write out initialize_all method + self.top_level_file.write(SubstituteTemplate(self.top_level_initialize, + {'operation_count': self.operation_count, 'fn_calls':"\n".join(self.fn_calls)})) + + self.top_level_file.write(self.top_level_suffix) + self.top_level_file.close() + +################################################################################################### +################################################################################################### + +class Options: + def __init__(self): + pass + +################################################################################################### + +# +class Manifest: + + # + def __init__(self, args = None): + self.operations = {} + self.args = args + self.operation_count = 0 + self.operations_by_name = {} + + self.kernel_filter = '' + self.kernel_filter_list = [] + self.kernel_names = [] + self.operations_enabled = [] + self.selected_kernels = [] + self.ignore_kernel_names = [] + self.compute_capabilities = [50,] + self.curr_build_dir = '.' + self.filter_by_cc = True + + if self.args: + self.kernel_filter = self.args.kernels + self.curr_build_dir = args.curr_build_dir + + architectures = args.architectures.split(';') if len(args.architectures) else ['50',] + architectures = [x if x != '90a' else '90' for x in architectures] + + self.compute_capabilities = [int(x) for x in architectures] + + if args.filter_by_cc in ['false', 'False', '0']: + self.filter_by_cc = False + + if args.operations == 'all': + self.operations_enabled = [] + else: + operations_list = [ + OperationKind.Gemm + , OperationKind.Conv2d + , OperationKind.Conv3d + , OperationKind.RankK + , OperationKind.Trmm + , OperationKind.Symm + ] + self.operations_enabled = [x for x in operations_list if OperationKindNames[x] in args.operations.split(',')] + + if args.kernels == 'all': + self.kernel_names = [] + else: + self.kernel_names = [x for x in args.kernels.split(',') if x != ''] + + self.ignore_kernel_names = [x for x in args.ignore_kernels.split(',') if x != ''] + + if args.kernel_filter_file is None: + self.kernel_filter_list = [] + else: + self.kernel_filter_list = self.get_kernel_filters(args.kernel_filter_file) + _LOGGER.info("Using {filter_count} kernel filters from {filter_file}".format( + filter_count = len(self.kernel_filter_list), + filter_file = args.kernel_filter_file)) + + self.operation_count = 0 + self.operations_by_name = {} + self.disable_full_archs_compilation = args.disable_full_archs_compilation + + + def get_kernel_filters (self, kernelListFile): + if os.path.isfile(kernelListFile): + with open(kernelListFile, 'r') as fileReader: + lines = [line.rstrip() for line in fileReader if not line.startswith("#")] + + lines = [re.compile(line) for line in lines if line] + return lines + else: + return [] + + # + def filter_out_kernels(self, kernel_name, kernel_filter_list): + + for kernel_filter_re in kernel_filter_list: + if kernel_filter_re.search(kernel_name) is not None: + return True + + return False + + + # + def _filter_string_matches(self, filter_string, haystack): + ''' Returns true if all substrings appear in the haystack in order''' + substrings = filter_string.split('*') + for sub in substrings: + idx = haystack.find(sub) + if idx < 0: + return False + haystack = haystack[idx + len(sub):] + return True + + # + def filter(self, operation): + ''' Filtering operations based on various criteria''' + + # filter based on compute capability + enabled = not (self.filter_by_cc) + + for cc in self.compute_capabilities: + if cc >= operation.tile_description.minimum_compute_capability and \ + cc <= operation.tile_description.maximum_compute_capability and \ + (cc not in SharedMemPerCC or SharedMemPerCC[cc] >= CalculateSmemUsage(operation)): + + enabled = True + break + + if not enabled: + return False + + if len(self.operations_enabled) and not operation.operation_kind in self.operations_enabled: + return False + + # eliminate duplicates + if operation.procedural_name() in self.operations_by_name.keys(): + return False + + # Filter based on list of valid substrings + if len(self.kernel_names): + name = operation.procedural_name() + enabled = False + + # compare against the include list + for name_substr in self.kernel_names: + if self._filter_string_matches(name_substr, name): + _LOGGER.debug("Kernel {kernel} included due to filter string '{filt}'.".format( + kernel = operation.procedural_name(), + filt = name_substr)) + enabled = True + break + + # compare against the exclude list + for name_substr in self.ignore_kernel_names: + if self._filter_string_matches(name_substr, name): + _LOGGER.debug("Kernel {kernel} ignored due to filter string '{filt}'.".format( + kernel = operation.procedural_name(), + filt = name_substr)) + enabled = False + break + + if len(self.kernel_filter_list) > 0: + if self.filter_out_kernels(operation.procedural_name(), self.kernel_filter_list): + _LOGGER.debug("Kernel {kernel} matched via kernel filter file.".format(kernel = operation.procedural_name())) + enabled = True + else: + _LOGGER.debug("Kernel {kernel} culled due to no match in kernel filter file.".format(kernel = operation.procedural_name())) + enabled = False + + + # TODO: filter based on compute data type + return enabled + # + + # + def append(self, operation): + ''' + Inserts the operation. + + operation_kind -> configuration_name -> [] + ''' + + if self.filter(operation): + + self.selected_kernels.append(operation.procedural_name()) + + self.operations_by_name[operation.procedural_name()] = operation + + # add the configuration + configuration_name = operation.configuration_name() + + # Split operations by minimum CC + min_cc = operation.arch + + if operation.operation_kind not in self.operations.keys(): + self.operations[operation.operation_kind] = {} + + if min_cc not in self.operations[operation.operation_kind]: + self.operations[operation.operation_kind][min_cc] = {} + + if configuration_name not in self.operations[operation.operation_kind][min_cc].keys(): + self.operations[operation.operation_kind][min_cc][configuration_name] = [] + + self.operations[operation.operation_kind][min_cc][configuration_name].append(operation) + self.operation_count += 1 + else: + _LOGGER.debug("Culled {} from manifest".format(operation.procedural_name())) + # + + def emit_manifest_cmake(self, manifest_path, top_level_path, source_files): + with open(manifest_path, "w") as manifest_file: + + target_text = SubstituteTemplate("""cutlass_target_sources(cutlass_library_objs PRIVATE + """, { }) + manifest_file.write(target_text + '\n\n') + manifest_file.write(" %s\n" % str(top_level_path.replace('\\', '/'))) + generated_path = os.path.join(self.curr_build_dir, 'generated') + for kind in self.operations.keys(): + kind_str = OperationKindNames[kind] + all_kind_file = os.path.join(generated_path, kind_str, f"all_{kind_str}_operations.cu").replace('\\', '/') + manifest_file.write(f" {all_kind_file}\n") + manifest_file.write(')\n\n') + + for kind in self.operations.keys(): + for min_cc in sorted(self.operations[kind].keys()): + for subclass in sorted(source_files[kind][min_cc].keys()): + target_text = SubstituteTemplate("""cutlass_add_cutlass_library( + SUFFIX ${kind}_sm${min_cc}_${subclass} +""", { 'min_cc': str(min_cc), 'kind': OperationKindNames[kind], 'subclass': subclass }) + manifest_file.write(target_text + '\n\n') + + for source_file in source_files[kind][min_cc][subclass]: + manifest_file.write(" %s\n" % str(source_file.replace('\\', '/'))) + + manifest_file.write(")\n") + + if self.disable_full_archs_compilation: + self.emit_disable_full_archs_compilation(manifest_file, source_files) + + def emit_disable_full_archs_compilation(manifest_file, source_files): + def for_hopper(name): + pass + + def for_ampere(name): + return "16816" in name or \ + "16832" in name or \ + "16864" in name or \ + ("1688" in name and "tf32" in name) + + def for_turing(name): + return ("1688" in name and "tf32" not in name) or \ + "8816" in name + + def for_volta(name): + return "884" in name + + def is_cpp(name): + return name.endswith(".cpp") + + def get_src_archs_str_given_requested_cuda_archs(archs, source_file): + intersected_archs = archs & set(self.compute_capabilities) + if intersected_archs == set(): + raise RuntimeError( + """ + Empty archs set for file {} after taking + the intersection of {} (global requested archs) and + {} (per file requested archs) + """.format(source_file, set(self.compute_capabilities), archs)) + else: + return " ".join(map(str, intersected_archs)) + + for min_cc in sorted(source_files.keys()): + for source_file in source_files[min_cc]: + if is_cpp(source_file): + continue # skip because source is cpp + elif for_ampere(source_file): + archs_str = get_src_archs_str_given_requested_cuda_archs({80, 87, 90}, source_file) + elif for_turing(source_file): + archs_str = get_src_archs_str_given_requested_cuda_archs({75}, source_file) + elif for_volta(source_file): + archs_str = get_src_archs_str_given_requested_cuda_archs({70, 72}, source_file) + else: + raise RuntimeError("Per file archs are not set {}, as there is no rule specified for this file pattern".format(source_file)) + + manifest_file.write("cutlass_apply_cuda_gencode_flags({} SM_ARCHS {})\n".format(str(source_file.replace('\\', '/')), archs_str)) + + # + def emit(self, target = GeneratorTarget.Library): + + operation_emitters = { + GeneratorTarget.Library: EmitOperationKindLibrary + } + + # Emitters for all operations that fall under a particular kind (e.g., GEMM, Conv2d) + kind_emitters = { + GeneratorTarget.Library: EmitOperationKindAll + } + + interface_emitters = { + GeneratorTarget.Library: EmitInterfaceLibrary + } + + generated_path = os.path.join(self.curr_build_dir, 'generated') + + # create generated/ + if os.path.exists(generated_path): + shutil.rmtree(generated_path) + + os.mkdir(generated_path) + + with interface_emitters[target](generated_path, self.operation_count, self.args) as iface_emitter: + top_level_path = iface_emitter.top_level_path + for operation_kind in self.operations.keys(): + iface_emitter.emit(OperationKindNames[operation_kind]) + + source_files = {} + for kind in self.operations.keys(): + source_files[kind] = {} + for min_cc in self.operations[kind].keys(): + source_files[kind][min_cc] = {} + + for operation_kind, ops in self.operations.items(): + for min_cc, configurations in sorted(ops.items()): + with operation_emitters[target](generated_path, min_cc, operation_kind, self.args) as operation_kind_emitter: + for configuration_name, operations in configurations.items(): + _LOGGER.info("Emitting {config} with {num_ops} operations.".format( + config = configuration_name, num_ops = len(operations))) + operation_kind_emitter.emit(configuration_name, operations) + + for subclass, files in operation_kind_emitter.source_files.items(): + if subclass not in source_files[operation_kind][min_cc]: + source_files[operation_kind][min_cc][subclass] = [] + source_files[operation_kind][min_cc][subclass].extend(operation_kind_emitter.source_files[subclass]) + + # Emit top level all_{gemm, conv2d, ...}_operations.cu files + with kind_emitters[target](generated_path, operation_kind, self.args) as operation_kind_emitter: + operation_kind_emitter.emit(ops) + + # write the manifest.cmake file containing paths from all targets + manifest_path = os.path.join(generated_path, "manifest.cmake") + + self.emit_manifest_cmake(manifest_path, top_level_path, source_files) + +################################################################################################### diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/rank_2k_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/rank_2k_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..4b3bab30a5dbe6362bc47da19affd2364e79a604 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/rank_2k_operation.py @@ -0,0 +1,428 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Utilities for emitting Rank2K kernels +""" + +import enum +import os.path +import shutil +import functools +import operator + +from cutlass_library.library import * + + +################################################################################################### +# +# Data structure modeling a Rank K update operation +# +################################################################################################### + +# +class Rank2KOperation: + # + def __init__(self, rank_k_kind, arch, tile_description, A, C, element_epilogue, \ + epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \ + blas_mode = BlasMode.symmetric): + + self.blas_mode = blas_mode + self.operation_kind = OperationKind.Rank2K + self.arch = arch + self.tile_description = tile_description + self.rank_k_kind = rank_k_kind + # tensor A and B have same data type and layout + self.A = A + self.B = A + self.C = C + self.element_epilogue = element_epilogue + self.epilogue_functor = epilogue_functor + self.swizzling_functor = swizzling_functor + + # + def is_complex(self): + complex_operators = [ + MathOperation.multiply_add_complex, + MathOperation.multiply_add_complex_gaussian, + MathOperation.multiply_add_complex_fast_f32 + ] + return self.tile_description.math_instruction.math_operation in complex_operators + return False + + # + def is_planar_complex(self): + return False + + # + def accumulator_type(self): + accum = self.tile_description.math_instruction.element_accumulator + + if self.is_complex(): + return get_complex_from_real(accum) + + return accum + + # + def short_math_name(self): + if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian: + return "g%s" % ShortDataTypeNames[self.accumulator_type()] + return ShortDataTypeNames[self.accumulator_type()] + + + # + def core_name(self): + ''' The basic operation kind is prefixed with a letter indicating the accumulation type. ''' + + inst_shape = '' + inst_operation = '' + intermediate_type = '' + + math_operations_map = { + MathOperation.xor_popc: 'xor', + MathOperation.and_popc: 'and' + } + + if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \ + self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp: + + math_op = self.tile_description.math_instruction.math_operation + math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else '' + + inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape) + inst_shape += math_op_string + + if self.tile_description.math_instruction.element_a != self.A.element and \ + self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator: + intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] + + operation_name = 'syr2k' if self.blas_mode == BlasMode.symmetric else 'her2k' + + return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, operation_name) + + # + def extended_name(self): + ''' Append data types if they differ from compute type. ''' + if self.is_complex(): + extended_name = "${core_name}" + else: + if self.C.element != self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${element_c}_${core_name}_${element_a}" + elif self.C.element == self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${core_name}_${element_a}" + else: + extended_name = "${core_name}" + + extended_name = SubstituteTemplate(extended_name, { + 'element_a': DataTypeNames[self.A.element], + 'element_c': DataTypeNames[self.C.element], + 'core_name': self.core_name() + }) + + return extended_name + + # + def layout_name(self): + if self.is_complex() or self.is_planar_complex(): + return "%s" % ( + ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)] + ) + return "%s" % (ShortLayoutTypeNames[self.A.layout]) + + # + def fill_mode_name(self): + return "%s" % (ShortFillModeNames[self.C.fill_mode]) + + # + def procedural_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + threadblock = self.tile_description.procedural_name() + + opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class] + + alignment = max([self.A.alignment, self.C.alignment]) + + return SubstituteTemplate( + "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${fill_mode}_align${alignment}", + { + 'opcode_class': opcode_class_name, + 'extended_name': self.extended_name(), + 'threadblock': threadblock, + 'layout': self.layout_name(), + 'fill_mode': self.fill_mode_name(), + 'alignment': "%d" % self.A.alignment, + } + ) + + # + def configuration_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + return self.procedural_name() + +################################################################################################### +# +# Emits single instances of a CUTLASS device-wide operator +# +################################################################################################### + +# +class EmitRank2KUniversalInstance: + ''' Responsible for emitting a CUTLASS template definition''' + + def __init__(self): + self.rank_k_template = """ +// Rank K operator ${operation_name} +using Operation_${operation_name} = + typename cutlass::gemm::device::Rank2K< + ${element_a}, ${layout_a}, + ${element_b}, ${layout_b}, + ${element_c}, ${layout_c}, ${fill_mode}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + >, + ${swizzling_functor}, + ${stages}, + ${align_a}, + ${align_b}, + ${split_k_serial}, + ${math_operation} +>; +""" + self.rank_k_complex_template = """ +// Rank K operator ${operation_name} +using Operation_${operation_name} = + typename cutlass::gemm::device::Rank2K< + ${element_a}, ${layout_a}, + ${element_b}, ${layout_b}, + ${element_c}, ${layout_c}, ${fill_mode}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + >, + ${swizzling_functor}, + ${stages}, + ${align_a}, + ${align_b}, + ${split_k_serial}, + ${math_operation}, + ${transform_a}, + ${transform_b}, + ${blas_mode} +>; +""" + + def emit(self, operation): + + threadblock_shape = operation.tile_description.threadblock_shape + + warp_count = operation.tile_description.warp_count + warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)] + + epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) + + values = { + 'operation_name': operation.procedural_name(), + 'element_a': DataTypeTag[operation.A.element], + 'layout_a': LayoutTag[operation.A.layout], + 'element_b': DataTypeTag[operation.B.element], + 'layout_b': LayoutTag[operation.B.layout], + 'element_c': DataTypeTag[operation.C.element], + 'layout_c': LayoutTag[operation.C.layout], + 'fill_mode': FillModeTag[operation.C.fill_mode], + 'element_accumulator': DataTypeTag[operation.accumulator_type()], + 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + 'arch': "cutlass::arch::Sm%d" % operation.arch, + 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), + 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), + 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), + 'warp_shape_m': str(warp_shape[0]), + 'warp_shape_n': str(warp_shape[1]), + 'warp_shape_k': str(warp_shape[2]), + 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), + 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), + 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), + 'epilogue_vector_length': str(epilogue_vector_length), + 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), + 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], + 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], + 'stages': str(operation.tile_description.stages), + 'align_a': str(operation.A.alignment), + 'align_b': str(operation.B.alignment), + 'split_k_serial': 'false', + 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], + 'transform_a': ComplexTransformTag[operation.A.complex_transform], + 'transform_b': ComplexTransformTag[operation.B.complex_transform], + 'blas_mode': BlasModeTag[operation.blas_mode] + } + + rank_k_template = self.rank_k_complex_template if operation.is_complex() else self.rank_k_template + + return SubstituteTemplate(rank_k_template, values) + +################################################################################################### + + +################################################################################################### +# +# Emitters functions for all targets +# +################################################################################################### + +class EmitRank2KConfigurationLibrary: + def __init__(self, operation_path, configuration_name): + self.configuration_name = configuration_name + self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/') + + self.instance_emitter = { + RankKKind.Universal: EmitRank2KUniversalInstance, + } + + self.rank_k_kind_wrappers = { + RankKKind.Universal: 'Rank2KOperation', + } + + self.instance_template = { + RankKKind.Universal: """ +${compile_guard_start} + manifest.append(new ${rank_k_kind}< + Operation_${operation_name} + >("${operation_name}")); +${compile_guard_end} +""" + } + + self.header_template = """ +/* + Generated by rank_2k_operation.py - Do not edit. +*/ + +/////////////////////////////////////////////////////////////////////////////////////////////////// +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "library_internal.h" +#include "rank_2k_operation.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + + self.initialize_function_template = """ + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_${configuration_name}(Manifest &manifest) { + +""" + self.epilogue_template = """ + +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + + def __enter__(self): + self.configuration_file = open(self.configuration_path, "w") + self.configuration_file.write(self.header_template) + + self.instance_definitions = [] + self.instance_wrappers = [] + + self.operations = [] + return self + + def emit(self, operation): + emitter = self.instance_emitter[operation.rank_k_kind]() + + self.operations.append(operation) + + self.instance_definitions.append(emitter.emit(operation)) + + self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.rank_k_kind], { + 'configuration_name': self.configuration_name, + 'operation_name': operation.procedural_name(), + 'rank_k_kind': self.rank_k_kind_wrappers[operation.rank_k_kind], + 'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \ + if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "", + 'compile_guard_end': "#endif" \ + if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "" + })) + + def __exit__(self, exception_type, exception_value, traceback): + + # Write instance definitions in top-level namespace + for instance_definition in self.instance_definitions: + self.configuration_file.write(instance_definition) + + # Add wrapper objects within initialize() function + self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, { + 'configuration_name': self.configuration_name + })) + + for instance_wrapper in self.instance_wrappers: + self.configuration_file.write(instance_wrapper) + + self.configuration_file.write(self.epilogue_template) + self.configuration_file.close() + +################################################################################################### diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/rank_k_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/rank_k_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..993df7ca1eac1945da4f6857f8bcee4b2ce475b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/rank_k_operation.py @@ -0,0 +1,417 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Utilities for emitting RankK kernels +""" + +import enum +import os.path +import shutil +import functools +import operator + +from cutlass_library.library import * + + +################################################################################################### +# +# Data structure modeling a Rank K update operation +# +################################################################################################### + +# +class RankKOperation: + # + def __init__(self, rank_k_kind, arch, tile_description, A, C, element_epilogue, \ + epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \ + blas_mode = BlasMode.symmetric): + + self.blas_mode = blas_mode + self.operation_kind = OperationKind.RankK + self.arch = arch + self.tile_description = tile_description + self.rank_k_kind = rank_k_kind + self.A = A + self.C = C + self.element_epilogue = element_epilogue + self.epilogue_functor = epilogue_functor + self.swizzling_functor = swizzling_functor + + # + def is_complex(self): + complex_operators = [ + MathOperation.multiply_add_complex, + MathOperation.multiply_add_complex_gaussian, + MathOperation.multiply_add_complex_fast_f32 + ] + return self.tile_description.math_instruction.math_operation in complex_operators + return False + + # + def is_planar_complex(self): + return False + + # + def accumulator_type(self): + accum = self.tile_description.math_instruction.element_accumulator + + if self.is_complex(): + return get_complex_from_real(accum) + + return accum + + # + def short_math_name(self): + if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian: + return "g%s" % ShortDataTypeNames[self.accumulator_type()] + return ShortDataTypeNames[self.accumulator_type()] + + + # + def core_name(self): + ''' The basic operation kind is prefixed with a letter indicating the accumulation type. ''' + + inst_shape = '' + inst_operation = '' + intermediate_type = '' + + math_operations_map = { + MathOperation.xor_popc: 'xor', + MathOperation.and_popc: 'and' + } + + if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \ + self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp: + + math_op = self.tile_description.math_instruction.math_operation + math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else '' + + inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape) + inst_shape += math_op_string + + if self.tile_description.math_instruction.element_a != self.A.element and \ + self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator: + intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] + + operation_name = 'syrk' if self.blas_mode == BlasMode.symmetric else 'herk' + + return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, operation_name) + + # + def extended_name(self): + ''' Append data types if they differ from compute type. ''' + if self.is_complex(): + extended_name = "${core_name}" + else: + if self.C.element != self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${element_c}_${core_name}_${element_a}" + elif self.C.element == self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${core_name}_${element_a}" + else: + extended_name = "${core_name}" + + extended_name = SubstituteTemplate(extended_name, { + 'element_a': DataTypeNames[self.A.element], + 'element_c': DataTypeNames[self.C.element], + 'core_name': self.core_name() + }) + + return extended_name + + # + def layout_name(self): + if self.is_complex() or self.is_planar_complex(): + return "%s" % ( + ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)] + ) + return "%s" % (ShortLayoutTypeNames[self.A.layout]) + + # + def fill_mode_name(self): + return "%s" % (ShortFillModeNames[self.C.fill_mode]) + + # + def procedural_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + threadblock = self.tile_description.procedural_name() + + opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class] + + alignment = max([self.A.alignment, self.C.alignment]) + + return SubstituteTemplate( + "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${fill_mode}_align${alignment}", + { + 'opcode_class': opcode_class_name, + 'extended_name': self.extended_name(), + 'threadblock': threadblock, + 'layout': self.layout_name(), + 'fill_mode': self.fill_mode_name(), + 'alignment': "%d" % self.A.alignment, + } + ) + + # + def configuration_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + return self.procedural_name() + +################################################################################################### +# +# Emits single instances of a CUTLASS device-wide operator +# +################################################################################################### + +# +class EmitRankKUniversalInstance: + ''' Responsible for emitting a CUTLASS template definition''' + + def __init__(self): + self.rank_k_template = """ +// Rank K operator ${operation_name} +using Operation_${operation_name} = + typename cutlass::gemm::device::RankK< + ${element_a}, ${layout_a}, + ${element_c}, ${layout_c}, ${fill_mode}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + >, + ${swizzling_functor}, + ${stages}, + ${align_a}, + ${split_k_serial}, + ${math_operation} +>; +""" + self.rank_k_complex_template = """ +// Rank K operator ${operation_name} +using Operation_${operation_name} = + typename cutlass::gemm::device::RankK< + ${element_a}, ${layout_a}, + ${element_c}, ${layout_c}, ${fill_mode}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + >, + ${swizzling_functor}, + ${stages}, + ${align_a}, + ${split_k_serial}, + ${math_operation}, + ${transform_a}, + ${blas_mode} +>; +""" + + def emit(self, operation): + + threadblock_shape = operation.tile_description.threadblock_shape + + warp_count = operation.tile_description.warp_count + warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)] + + epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) + + values = { + 'operation_name': operation.procedural_name(), + 'element_a': DataTypeTag[operation.A.element], + 'layout_a': LayoutTag[operation.A.layout], + 'element_c': DataTypeTag[operation.C.element], + 'layout_c': LayoutTag[operation.C.layout], + 'fill_mode': FillModeTag[operation.C.fill_mode], + 'element_accumulator': DataTypeTag[operation.accumulator_type()], + 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + 'arch': "cutlass::arch::Sm%d" % operation.arch, + 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), + 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), + 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), + 'warp_shape_m': str(warp_shape[0]), + 'warp_shape_n': str(warp_shape[1]), + 'warp_shape_k': str(warp_shape[2]), + 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), + 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), + 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), + 'epilogue_vector_length': str(epilogue_vector_length), + 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), + 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], + 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], + 'stages': str(operation.tile_description.stages), + 'align_a': str(operation.A.alignment), + 'split_k_serial': 'false', + 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], + 'transform_a': ComplexTransformTag[operation.A.complex_transform], + 'blas_mode': BlasModeTag[operation.blas_mode] + } + + rank_k_template = self.rank_k_complex_template if operation.is_complex() else self.rank_k_template + + return SubstituteTemplate(rank_k_template, values) + +################################################################################################### + + +################################################################################################### +# +# Emitters functions for all targets +# +################################################################################################### + +class EmitRankKConfigurationLibrary: + def __init__(self, operation_path, configuration_name): + self.configuration_name = configuration_name + self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/') + + self.instance_emitter = { + RankKKind.Universal: EmitRankKUniversalInstance, + } + + self.rank_k_kind_wrappers = { + RankKKind.Universal: 'RankKOperation', + } + + self.instance_template = { + RankKKind.Universal: """ +${compile_guard_start} + manifest.append(new ${rank_k_kind}< + Operation_${operation_name} + >("${operation_name}")); +${compile_guard_end} +""" + } + + self.header_template = """ +/* + Generated by rank_k_operation.py - Do not edit. +*/ + +/////////////////////////////////////////////////////////////////////////////////////////////////// +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "library_internal.h" +#include "rank_k_operation.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + + self.initialize_function_template = """ + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_${configuration_name}(Manifest &manifest) { + +""" + self.epilogue_template = """ + +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + + def __enter__(self): + self.configuration_file = open(self.configuration_path, "w") + self.configuration_file.write(self.header_template) + + self.instance_definitions = [] + self.instance_wrappers = [] + + self.operations = [] + return self + + def emit(self, operation): + emitter = self.instance_emitter[operation.rank_k_kind]() + + self.operations.append(operation) + + self.instance_definitions.append(emitter.emit(operation)) + + self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.rank_k_kind], { + 'configuration_name': self.configuration_name, + 'operation_name': operation.procedural_name(), + 'rank_k_kind': self.rank_k_kind_wrappers[operation.rank_k_kind], + 'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \ + if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "", + 'compile_guard_end': "#endif" \ + if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "" + })) + + def __exit__(self, exception_type, exception_value, traceback): + + # Write instance definitions in top-level namespace + for instance_definition in self.instance_definitions: + self.configuration_file.write(instance_definition) + + # Add wrapper objects within initialize() function + self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, { + 'configuration_name': self.configuration_name + })) + + for instance_wrapper in self.instance_wrappers: + self.configuration_file.write(instance_wrapper) + + self.configuration_file.write(self.epilogue_template) + self.configuration_file.close() + +################################################################################################### diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/symm_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/symm_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..5b2a1603886b75a959fc98ba206ce991db00ee5a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/symm_operation.py @@ -0,0 +1,430 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Utilities for emitting Symm kernels +""" + +import enum +import os.path +import shutil +import functools +import operator + +from cutlass_library.library import * + + +################################################################################################### +# +# Data structure modeling a Symm update operation +# +################################################################################################### + +# +class SymmOperation: + # + def __init__(self, symm_kind, arch, tile_description, A, B, C, element_epilogue, \ + epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \ + blas_mode = BlasMode.symmetric): + + self.blas_mode = blas_mode + self.operation_kind = OperationKind.Symm + self.arch = arch + self.tile_description = tile_description + self.symm_kind = symm_kind + # tensor A and B have same data type and layout + self.A = A + self.B = B + self.C = C + self.element_epilogue = element_epilogue + self.epilogue_functor = epilogue_functor + self.swizzling_functor = swizzling_functor + + # + def is_complex(self): + complex_operators = [ + MathOperation.multiply_add_complex, + MathOperation.multiply_add_complex_gaussian, + MathOperation.multiply_add_complex_fast_f32 + ] + return self.tile_description.math_instruction.math_operation in complex_operators + return False + + # + def is_planar_complex(self): + return False + + # + def accumulator_type(self): + accum = self.tile_description.math_instruction.element_accumulator + + if self.is_complex(): + return get_complex_from_real(accum) + + return accum + + # + def short_math_name(self): + if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian: + return "g%s" % ShortDataTypeNames[self.accumulator_type()] + return ShortDataTypeNames[self.accumulator_type()] + + + # + def core_name(self): + ''' The basic operation kind is prefixed with a letter indicating the accumulation type. ''' + + inst_shape = '' + inst_operation = '' + intermediate_type = '' + + math_operations_map = { + MathOperation.xor_popc: 'xor', + MathOperation.and_popc: 'and' + } + + if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \ + self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp: + + math_op = self.tile_description.math_instruction.math_operation + math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else '' + + inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape) + inst_shape += math_op_string + + if self.tile_description.math_instruction.element_a != self.A.element and \ + self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator: + intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] + + operation_name = 'symm' if self.blas_mode == BlasMode.symmetric else 'hemm' + + return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, operation_name) + + # + def extended_name(self): + ''' Append data types if they differ from compute type. ''' + if self.is_complex(): + extended_name = "${core_name}" + else: + if self.C.element != self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${element_c}_${core_name}_${element_a}" + elif self.C.element == self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${core_name}_${element_a}" + else: + extended_name = "${core_name}" + + extended_name = SubstituteTemplate(extended_name, { + 'element_a': DataTypeNames[self.A.element], + 'element_c': DataTypeNames[self.C.element], + 'core_name': self.core_name() + }) + + return extended_name + + # + def layout_name(self): + if self.is_complex() or self.is_planar_complex(): + return "%s" % ( + ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)] + ) + return "%s" % (ShortLayoutTypeNames[self.A.layout]) + + # + def side_mode_name(self): + return "%s" % (ShortSideModeNames[self.A.side_mode]) + + # + def fill_mode_name(self): + return "%s" % (ShortFillModeNames[self.A.fill_mode]) + + # + def procedural_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + threadblock = self.tile_description.procedural_name() + + opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class] + + alignment = self.C.alignment + + return SubstituteTemplate( + "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${side_mode}_${fill_mode}_align${alignment}", + { + 'opcode_class': opcode_class_name, + 'extended_name': self.extended_name(), + 'threadblock': threadblock, + 'layout': self.layout_name(), + 'side_mode': self.side_mode_name(), + 'fill_mode': self.fill_mode_name(), + 'alignment': "%d" % alignment, + } + ) + + # + def configuration_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + return self.procedural_name() + +################################################################################################### +# +# Emits single instances of a CUTLASS device-wide operator +# +################################################################################################### + +# +class EmitSymmUniversalInstance: + ''' Responsible for emitting a CUTLASS template definition''' + + def __init__(self): + self.symm_template = """ +// Symm operator ${operation_name} +using Operation_${operation_name} = + typename cutlass::gemm::device::Symm< + ${element_a}, ${layout_a}, ${side_mode}, ${fill_mode}, + ${element_b}, ${layout_b}, + ${element_c}, ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + >, + ${swizzling_functor}, + ${stages}, + ${align_a}, + ${align_b}, + ${split_k_serial}, + ${math_operation} +>; +""" + self.symm_complex_template = """ +// Symm operator ${operation_name} +using Operation_${operation_name} = + typename cutlass::gemm::device::Symm< + ${element_a}, ${layout_a}, ${side_mode}, ${fill_mode}, + ${element_b}, ${layout_b}, + ${element_c}, ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + >, + ${swizzling_functor}, + ${stages}, + ${align_a}, + ${align_b}, + ${split_k_serial}, + ${math_operation}, + ${blas_mode} +>; +""" + + def emit(self, operation): + + threadblock_shape = operation.tile_description.threadblock_shape + + warp_count = operation.tile_description.warp_count + warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)] + + epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) + + values = { + 'operation_name': operation.procedural_name(), + 'element_a': DataTypeTag[operation.A.element], + 'layout_a': LayoutTag[operation.A.layout], + 'side_mode': SideModeTag[operation.A.side_mode], + 'fill_mode': FillModeTag[operation.A.fill_mode], + 'element_b': DataTypeTag[operation.B.element], + 'layout_b': LayoutTag[operation.B.layout], + 'element_c': DataTypeTag[operation.C.element], + 'layout_c': LayoutTag[operation.C.layout], + 'element_accumulator': DataTypeTag[operation.accumulator_type()], + 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + 'arch': "cutlass::arch::Sm%d" % operation.arch, + 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), + 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), + 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), + 'warp_shape_m': str(warp_shape[0]), + 'warp_shape_n': str(warp_shape[1]), + 'warp_shape_k': str(warp_shape[2]), + 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), + 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), + 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), + 'epilogue_vector_length': str(epilogue_vector_length), + 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), + 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], + 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], + 'stages': str(operation.tile_description.stages), + 'align_a': str(operation.A.alignment), + 'align_b': str(operation.B.alignment), + 'split_k_serial': 'false', + 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], + 'blas_mode': BlasModeTag[operation.blas_mode] + } + + symm_template = self.symm_complex_template if operation.is_complex() else self.symm_template + + return SubstituteTemplate(symm_template, values) + +################################################################################################### + + +################################################################################################### +# +# Emitters functions for all targets +# +################################################################################################### + +class EmitSymmConfigurationLibrary: + def __init__(self, operation_path, configuration_name): + self.configuration_name = configuration_name + self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/') + + self.instance_emitter = { + SymmKind.Universal: EmitSymmUniversalInstance, + } + + self.symm_kind_wrappers = { + SymmKind.Universal: 'SymmOperation', + } + + self.instance_template = { + SymmKind.Universal: """ +${compile_guard_start} + manifest.append(new ${symm_kind}< + Operation_${operation_name} + >("${operation_name}")); +${compile_guard_end} +""" + } + + self.header_template = """ +/* + Generated by symm_operation.py - Do not edit. +*/ + +/////////////////////////////////////////////////////////////////////////////////////////////////// +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "library_internal.h" +#include "symm_operation.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + + self.initialize_function_template = """ + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_${configuration_name}(Manifest &manifest) { + +""" + self.epilogue_template = """ + +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + + def __enter__(self): + self.configuration_file = open(self.configuration_path, "w") + self.configuration_file.write(self.header_template) + + self.instance_definitions = [] + self.instance_wrappers = [] + + self.operations = [] + return self + + def emit(self, operation): + emitter = self.instance_emitter[operation.symm_kind]() + + self.operations.append(operation) + + self.instance_definitions.append(emitter.emit(operation)) + + self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.symm_kind], { + 'configuration_name': self.configuration_name, + 'operation_name': operation.procedural_name(), + 'symm_kind': self.symm_kind_wrappers[operation.symm_kind], + 'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \ + if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "", + 'compile_guard_end': "#endif" \ + if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "" + })) + + def __exit__(self, exception_type, exception_value, traceback): + + # Write instance definitions in top-level namespace + for instance_definition in self.instance_definitions: + self.configuration_file.write(instance_definition) + + # Add wrapper objects within initialize() function + self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, { + 'configuration_name': self.configuration_name + })) + + for instance_wrapper in self.instance_wrappers: + self.configuration_file.write(instance_wrapper) + + self.configuration_file.write(self.epilogue_template) + self.configuration_file.close() + +################################################################################################### diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/trmm_operation.py b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/trmm_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..b2b0577fd7df31866399c6c098c03cafc3c12965 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/cutlass_library/trmm_operation.py @@ -0,0 +1,437 @@ +################################################################################################# +# +# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +""" +Utilities for emitting Trmm kernels +""" + +import enum +import os.path +import shutil +import functools +import operator + +from cutlass_library.library import * + + +################################################################################################### +# +# Data structure modeling a TRMM operation +# +################################################################################################### + +# +class TrmmOperation: + # + def __init__(self, trmm_kind, arch, tile_description, A, B, C, element_epilogue, \ + epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8): + + self.operation_kind = OperationKind.Trmm + self.arch = arch + self.tile_description = tile_description + self.trmm_kind = trmm_kind + self.A = A + self.B = B + self.C = C + self.element_epilogue = element_epilogue + self.epilogue_functor = epilogue_functor + self.swizzling_functor = swizzling_functor + + # + def is_complex(self): + complex_operators = [ + MathOperation.multiply_add_complex, + MathOperation.multiply_add_complex_gaussian, + MathOperation.multiply_add_complex_fast_f32 + ] + return self.tile_description.math_instruction.math_operation in complex_operators + return False + + # + def is_planar_complex(self): +# return self.trmm_kind in (TrmmKind.PlanarComplex, TrmmKind.PlanarComplexArray) + return False + + # + def accumulator_type(self): + accum = self.tile_description.math_instruction.element_accumulator + + if self.is_complex(): + return get_complex_from_real(accum) + + return accum + + # + def short_math_name(self): + if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian: + return "g%s" % ShortDataTypeNames[self.accumulator_type()] + return ShortDataTypeNames[self.accumulator_type()] + + + # + def core_name(self): + ''' The basic operation kind is prefixed with a letter indicating the accumulation type. ''' + + inst_shape = '' + inst_operation = '' + intermediate_type = '' + + math_operations_map = { + MathOperation.xor_popc: 'xor', + MathOperation.and_popc: 'and' + } + + if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \ + self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp: + + math_op = self.tile_description.math_instruction.math_operation + math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else '' + + inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape) + inst_shape += math_op_string + + if self.tile_description.math_instruction.element_a != self.A.element and \ + self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator: + intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] + + return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, TrmmKindNames[self.trmm_kind]) + + # + def extended_name(self): + ''' Append data types if they differ from compute type. ''' + if self.is_complex(): + extended_name = "${core_name}" + else: + if self.C.element != self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${element_c}_${core_name}_${element_a}" + elif self.C.element == self.tile_description.math_instruction.element_accumulator and \ + self.A.element != self.tile_description.math_instruction.element_accumulator: + extended_name = "${core_name}_${element_a}" + else: + extended_name = "${core_name}" + + extended_name = SubstituteTemplate(extended_name, { + 'element_a': DataTypeNames[self.A.element], + 'element_c': DataTypeNames[self.C.element], + 'core_name': self.core_name() + }) + + return extended_name + + # + def layout_name(self): + if self.is_complex() or self.is_planar_complex(): + return "%s%s" % ( + ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)], + ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)] + ) + return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout]) + + # + def side_mode_name(self): + return "%s" % (ShortSideModeNames[self.A.side_mode]) + + # + def fill_mode_name(self): + return "%s" % (ShortFillModeNames[self.A.fill_mode]) + + # + def diag_type_name(self): + return "%s" % (ShortDiagTypeNames[self.A.diag_type]) + + # + def procedural_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + threadblock = self.tile_description.procedural_name() + + opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class] + + alignment = max([self.C.alignment]) + + return SubstituteTemplate( + "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${side_mode}_${fill_mode}_${diag_type}_align${alignment}", + { + 'opcode_class': opcode_class_name, + 'extended_name': self.extended_name(), + 'threadblock': threadblock, + 'layout': self.layout_name(), + 'side_mode': self.side_mode_name(), + 'fill_mode': self.fill_mode_name(), + 'diag_type': self.diag_type_name(), + 'alignment': "%d" % self.C.alignment, + } + ) + + # + def configuration_name(self): + ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' + return self.procedural_name() + +################################################################################################### +# +# Emits single instances of a CUTLASS device-wide operator +# +################################################################################################### + +# +class EmitTrmmUniversalInstance: + ''' Responsible for emitting a CUTLASS template definition''' + + def __init__(self): + self.trmm_template = """ +// Trmm operator ${operation_name} +using Operation_${operation_name} = + typename cutlass::gemm::device::Trmm< + ${element_a}, ${layout_a}, + ${side_mode}, ${fill_mode}, ${diag_type}, + ${element_b}, ${layout_b}, + ${element_c}, ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue}, + cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling + >, + ${swizzling_functor}, + ${stages}, + ${align_a}, + ${align_b}, + ${split_k_serial}, + ${math_operation} +>; +""" + self.trmm_complex_template = """ +// Trmm operator ${operation_name} +using Operation_${operation_name} = + typename cutlass::gemm::device::Trmm< + ${element_a}, ${layout_a}, + ${side_mode}, ${fill_mode}, ${diag_type}, + ${element_b}, ${layout_b}, + ${element_c}, ${layout_c}, + ${element_accumulator}, + ${opcode_class}, + ${arch}, + cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, + cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, + cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue}, + cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling + >, + ${swizzling_functor}, + ${stages}, + ${align_a}, + ${align_b}, + ${split_k_serial}, + ${math_operation}, + ${transform_a} +>; +""" + + def emit(self, operation): + + threadblock_shape = operation.tile_description.threadblock_shape + warp_count = operation.tile_description.warp_count + + warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)] + + epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) + + values = { + 'operation_name': operation.procedural_name(), + 'element_a': DataTypeTag[operation.A.element], + 'layout_a': LayoutTag[operation.A.layout], + 'side_mode' : SideModeTag[operation.A.side_mode], + 'fill_mode': FillModeTag[operation.A.fill_mode], + 'diag_type' : DiagTypeTag[operation.A.diag_type], + 'element_b': DataTypeTag[operation.B.element], + 'layout_b': LayoutTag[operation.B.layout], + 'element_c': DataTypeTag[operation.C.element], + 'layout_c': LayoutTag[operation.C.layout], + 'element_accumulator': DataTypeTag[operation.accumulator_type()], + 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], + 'arch': "cutlass::arch::Sm%d" % operation.arch, + 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), + 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), + 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), + 'warp_shape_m': str(warp_shape[0]), + 'warp_shape_n': str(warp_shape[1]), + 'warp_shape_k': str(warp_shape[2]), + 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), + 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), + 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), + 'epilogue_vector_length': str(epilogue_vector_length), + 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), + 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], + 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], + 'stages': str(operation.tile_description.stages), + 'align_a': str(1), # TRMM A's alignment is always 1 for no padding to work until we make zfill work with variable bytes + 'align_b': str(operation.B.alignment), + 'split_k_serial': 'false', + 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], + 'transform_a': ComplexTransformTag[operation.A.complex_transform] + } + + trmm_template = self.trmm_complex_template if operation.is_complex() else self.trmm_template + + return SubstituteTemplate(trmm_template, values) + +################################################################################################### + + +################################################################################################### +# +# Emitters functions for all targets +# +################################################################################################### + +class EmitTrmmConfigurationLibrary: + def __init__(self, operation_path, configuration_name): + self.configuration_name = configuration_name + self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/') + + self.instance_emitter = { + TrmmKind.Universal: EmitTrmmUniversalInstance, + } + + self.trmm_kind_wrappers = { + TrmmKind.Universal: 'TrmmOperation', + } + + self.instance_template = { + TrmmKind.Universal: """ +${compile_guard_start} + manifest.append(new ${trmm_kind}< + Operation_${operation_name} + >("${operation_name}")); +${compile_guard_end} +""" + } + + self.header_template = """ +/* + Generated by trmm_operation.py - Do not edit. +*/ + +/////////////////////////////////////////////////////////////////////////////////////////////////// +#include "cutlass/cutlass.h" +#include "cutlass/library/library.h" +#include "cutlass/library/manifest.h" + +#include "library_internal.h" +#include "trmm_operation.h" + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + + self.initialize_function_template = """ + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace library { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +void initialize_${configuration_name}(Manifest &manifest) { + +""" + self.epilogue_template = """ + +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace library +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +""" + + def __enter__(self): + self.configuration_file = open(self.configuration_path, "w") + self.configuration_file.write(self.header_template) + + self.instance_definitions = [] + self.instance_wrappers = [] + + self.operations = [] + return self + + def emit(self, operation): + emitter = self.instance_emitter[operation.trmm_kind]() + + self.operations.append(operation) + + self.instance_definitions.append(emitter.emit(operation)) + + self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.trmm_kind], { + 'configuration_name': self.configuration_name, + 'operation_name': operation.procedural_name(), + 'trmm_kind': self.trmm_kind_wrappers[operation.trmm_kind], + 'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \ + if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "", + 'compile_guard_end': "#endif" \ + if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "" + })) + + def __exit__(self, exception_type, exception_value, traceback): + + # Write instance definitions in top-level namespace + for instance_definition in self.instance_definitions: + self.configuration_file.write(instance_definition) + + # Add wrapper objects within initialize() function + self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, { + 'configuration_name': self.configuration_name + })) + + for instance_wrapper in self.instance_wrappers: + self.configuration_file.write(instance_wrapper) + + self.configuration_file.write(self.epilogue_template) + self.configuration_file.close() + +################################################################################################### diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docker/Dockerfile-cuda11.8-pytorch b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docker/Dockerfile-cuda11.8-pytorch new file mode 100644 index 0000000000000000000000000000000000000000..c573dfe7aa52951f0bcb1b66ce1892bce31c1038 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docker/Dockerfile-cuda11.8-pytorch @@ -0,0 +1,40 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +FROM nvcr.io/nvidia/pytorch:22.11-py3 + +RUN chmod ugo+rwx /home +RUN pip uninstall -y rmm +RUN pip install rmm-cu11 --extra-index-url=https://pypi.ngc.nvidia.com +ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH +ENV LIBRARY_PATH=/usr/local/cuda/lib64:$LIBRARY_PATH +ENV CUDA_INSTALL_PATH=/usr/local/cuda diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docker/Dockerfile-cuda12.0-pytorch b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docker/Dockerfile-cuda12.0-pytorch new file mode 100644 index 0000000000000000000000000000000000000000..a9a84bf36cf148ebd9f029ba7913d4a6c2e4ab16 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docker/Dockerfile-cuda12.0-pytorch @@ -0,0 +1,38 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +FROM nvcr.io/nvidia/pytorch:23.01-py3 + +RUN chmod ugo+rwx /home +ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH +ENV LIBRARY_PATH=/usr/local/cuda/lib64:$LIBRARY_PATH +ENV CUDA_INSTALL_PATH=/usr/local/cuda diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docker/Dockerfile-cuda12.1-pytorch b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docker/Dockerfile-cuda12.1-pytorch new file mode 100644 index 0000000000000000000000000000000000000000..884472f5aea51f182028c647a03c2bc897f94165 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docker/Dockerfile-cuda12.1-pytorch @@ -0,0 +1,38 @@ +################################################################################################# +# +# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +################################################################################################# + +FROM nvcr.io/nvidia/pytorch:23.03-py3 + +RUN chmod ugo+rwx /home +ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH +ENV LIBRARY_PATH=/usr/local/cuda/lib64:$LIBRARY_PATH +ENV CUDA_INSTALL_PATH=/usr/local/cuda diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/.buildinfo b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/.buildinfo new file mode 100644 index 0000000000000000000000000000000000000000..6f014926597e05a074093a14edbf5ada4d825910 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 4a5275a3b68094ba1d8a4b7e4c459321 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/emit/pytorch.html b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/emit/pytorch.html new file mode 100644 index 0000000000000000000000000000000000000000..fd3f85b817e9bdb0f43203ecf1df776f05bda931 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/emit/pytorch.html @@ -0,0 +1,923 @@ + + + + + + + + + cutlass.emit.pytorch - CUTLASS Python + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cutlass.emit.pytorch

+#################################################################################################
+#
+# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#################################################################################################
+
+"""
+Utilities for generating source for building a PyTorch CUDA extension that using a CUTLASS kernel.
+If specified, the extension can be JIT compiled via PyTorch's ``cpp_extension.load`` method.
+
+Example usage with JIT compilation:
+
+.. highlight:: python
+.. code-block:: python
+
+    plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass.LayoutType.RowMajor)
+    op = plan.construct()
+    mod = cutlass.emit.pytorch(op, 'cutlass_gemm', 80, jit=True)
+
+    # Generate inputs for the GEMM
+    A, B, C = [torch.ones((512, 512)).to('cuda') for _ in range(3)]
+
+    # Run the module
+    D = mod.run(A, B, C)
+
+
+Example usage without JIT compilation:
+
+.. highlight:: python
+.. code-block:: python
+
+    plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass.LayoutType.RowMajor)
+    op = plan.construct()
+    cutlass.emit.pytorch(op, 'cutlass_gemm', 80, jit=False, sourcedir='output')
+
+After this call, the directory ``output`` contains ``setup.py``,
+``cutlass_gemm.cpp``, and ``cutlass_gemm_kernel.cu``. The module can be built from
+within ``output`` by running: ``TORCH_CUDA_ARCH_LIST="8.0" python setup.py develop --user``.
+
+The module can later be used in Python via:
+
+.. highlight:: python
+.. code-block:: python
+
+    import torch
+    import cutlass_gemm
+
+    # Generate inputs for the GEMM
+    A, B, C = [torch.ones((512, 512)).to('cuda') for _ in range(3)]
+
+    # Run the module
+    D = cutlass_gemm.run(A, B, C)
+"""
+
+import logging
+import os
+
+import cutlass_bindings
+
+from cutlass import CUTLASS_PATH, logger, swizzle
+from cutlass.backend.gemm_operation import GemmOperationGrouped, GemmOperationUniversal
+from cutlass.backend.library import ApiVersion
+from cutlass.backend.utils.software import CheckPackages, SubstituteTemplate
+from cutlass.emit import common
+
+torch_available = CheckPackages().check_torch()
+if torch_available:
+    import torch
+
+
+_PYTORCH_CUDA_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
+#include <torch/extension.h>
+#include <ATen/ATen.h>
+
+#include "cutlass/cutlass.h"
+#include "cutlass/util/device_memory.h"
+
+${includes}
+${declaration}
+${impl}
+"""
+
+_PYTORCH_GEMM_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
+#include <torch/extension.h>
+#include <ATen/ATen.h>
+#include <pybind11/stl.h>
+
+// CUDA forward declarations
+at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt, float alpha=1.f, float beta=0.f);
+
+// C++ interface
+at::Tensor ${name}(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt, float alpha=1.f, float beta=0.f) {
+  return ${name}_kernel(A, B, C, alpha, beta);
+}
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+  m.def("run", py::overload_cast<const at::Tensor&, const at::Tensor&, at::optional<const at::Tensor>, float, float>(&${name}), py::arg("A"), py::arg("B"), py::arg("C") = nullptr, py::arg("alpha") = 1.f, py::arg("beta") = 0.f);
+}
+"""
+
+_PYTORCH_GROUPED_GEMM_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
+#include <torch/extension.h>
+#include <ATen/ATen.h>
+#include <pybind11/stl.h>
+
+// CUDA forward declarations
+std::vector<at::Tensor> ${name}_kernel(const std::vector<at::Tensor>& A, const std::vector<at::Tensor>& B, at::optional<const std::vector<at::Tensor>> C=at::nullopt, float alpha=1.f, float beta=0.f);
+
+// C++ interface
+std::vector<at::Tensor> ${name}(const std::vector<at::Tensor>& A, const std::vector<at::Tensor>& B, at::optional<const std::vector<at::Tensor>> C=at::nullopt, float alpha=1.f, float beta=0.f) {
+  return ${name}_kernel(A, B, C, alpha, beta);
+}
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+  m.def("run", py::overload_cast<const std::vector<at::Tensor>&, const std::vector<at::Tensor>&, at::optional<const std::vector<at::Tensor>>, float, float>(&${name}),
+        py::arg("A"), py::arg("B"), py::arg("C") = nullptr, py::arg("alpha") = 1.f, py::arg("beta") = 0.f);
+}
+"""
+
+_PYTORCH_GEMM_INCLUDES = {
+    ApiVersion.v2x: """
+#include "cutlass/gemm/device/gemm_universal.h"
+""",
+    ApiVersion.v3x: """
+#include "cutlass/gemm/device/gemm_universal_adapter.h"
+#include "cutlass/gemm/collective/collective_builder.hpp"
+#include "cutlass/gemm/device/gemm_universal_adapter.h"
+#include "cutlass/gemm/kernel/gemm_universal.hpp"
+#include "cutlass/epilogue/collective/default_epilogue.hpp"
+#include "cutlass/util/packed_stride.hpp"
+""",
+}
+
+_PYTORCH_GROUPED_GEMM_INCLUDES = """
+#include "cutlass/gemm/kernel/default_gemm_grouped.h"
+#include "cutlass/gemm/device/gemm_grouped.h"
+"""
+
+_CUTLASS_TYPE_TO_TORCH_TYPE = {
+    cutlass_bindings.float16: "torch::kF16",
+    cutlass_bindings.float32: "torch::kF32",
+    cutlass_bindings.float64: "torch::kF64",
+    cutlass_bindings.int8: "torch::I8",
+    cutlass_bindings.int32: "torch::I32",
+}
+
+_PYTORCH_GEMM_IMPL_TEMPLATE_2x = (
+    common._CUTLASS_KERNEL_RUN_GEMM_2x
+    + """
+at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C, float alpha, float beta) {
+    int M = A.size(0);
+    int N = B.size(1);
+    int K = A.size(1);
+
+    typename DeviceKernel::ElementC* ptrC = (C == at::nullopt) ?
+                                            nullptr :
+                                            reinterpret_cast<typename DeviceKernel::ElementC*>(C->contiguous().data_ptr());
+    at::Tensor D = B.new_empty({M, N}, ${torch_type_C});
+
+    cutlass::Status status = ${name}_kernel_run(M, N, K,
+                                                reinterpret_cast<typename DeviceKernel::ElementA*>(A.contiguous().data_ptr()),
+                                                reinterpret_cast<typename DeviceKernel::ElementB*>(B.contiguous().data_ptr()),
+                                                ptrC,
+                                                reinterpret_cast<typename DeviceKernel::ElementC*>(D.contiguous().data_ptr()),
+                                                ElementCompute(alpha), ElementCompute(beta));
+
+    TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
+    return D;
+}
+"""
+)
+
+_PYTORCH_GEMM_IMPL_TEMPLATE_3x = (
+    common._CUTLASS_KERNEL_RUN_GEMM_3x
+    + """
+bool hw_info_queried = false;
+cutlass::KernelHardwareInfo hw_info;
+
+at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C, float alpha, float beta) {
+    int M = A.size(0);
+    int N = B.size(1);
+    int K = A.size(1);
+    int L = 1;
+
+    // Query hardware info if we haven't already
+    if (!hw_info_queried) {
+        hw_info.device_id = 0;
+        hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
+    }
+
+    typename DeviceKernel::ElementC* ptrC = (C == at::nullopt) ?
+                                            nullptr :
+                                            reinterpret_cast<typename DeviceKernel::ElementC*>(C->contiguous().data_ptr());
+    at::Tensor D = B.new_empty({M, N}, ${torch_type_C});
+
+    cutlass::Status status = ${name}_kernel_run(M, N, K, L,
+                                                reinterpret_cast<typename DeviceKernel::ElementA*>(A.contiguous().data_ptr()),
+                                                reinterpret_cast<typename DeviceKernel::ElementB*>(B.contiguous().data_ptr()),
+                                                ptrC,
+                                                reinterpret_cast<typename DeviceKernel::ElementC*>(D.contiguous().data_ptr()),
+                                                ElementCompute(alpha), ElementCompute(beta),
+                                                hw_info);
+
+    TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
+    return D;
+}
+"""
+)
+
+
+_PYTORCH_GROUPED_GEMM_IMPL_TEMPLATE = (
+    common._CUTLASS_KERNEL_RUN_GROUPED_GEMM_2x
+    + """
+std::vector<at::Tensor> ${name}_kernel(const std::vector<at::Tensor>& A, const std::vector<at::Tensor>& B, at::optional<const std::vector<at::Tensor>> C, float alpha, float beta) {
+    size_t num = A.size();
+
+    // To avoid performing many small cudaMallocs and host-to-device copies,
+    // we serialize the grouped GEMM arguments on the host, allocate one
+    // large chunk of device memory, and perform a single cudaMemcpy to
+    // copy the host data to the device. Allocation overheads could be
+    // avoided by using a memory pool.
+
+    // Calculate the total size of the data to be copied from host to device
+    size_t total_size = sizeof(cutlass::gemm::GemmCoord) +
+                        sizeof(DeviceKernel::ElementA*) +
+                        sizeof(DeviceKernel::ElementB*) +
+                        sizeof(DeviceKernel::ElementC*) +
+                        sizeof(DeviceKernel::ElementC*) +
+                        sizeof(int64_t) +
+                        sizeof(int64_t) +
+                        sizeof(int64_t);
+    total_size *= num;
+
+    // num * sizeof(cutlass::gemm::GemmCoord) may leave one at a non-multiple
+    // of sizeof(DeviceKernel::ElementA*) (which will be 64 on a 64-bit system).
+    // To ensure that we don't end up having misaligned loads in the kernel,
+    // we pad to the nearest multiple of 8.
+    //
+    // Note that, even on a 32-bit system (for which sizeof(X*) will not equal
+    // sizeof(int64_t)), only padding between the list of GemmCoords and the
+    // list of ptr_As is sufficient because the set of four equal-length lists of pointers
+    // (A*, B*, C*, D*) will ensure that the first list of int64_ts will always
+    // start on a multiple of 8.
+    int64_t padding = 8 - (total_size % 8);
+    total_size += padding;
+
+    uint8_t* host_data = new uint8_t[total_size];
+    cutlass::DeviceAllocation<uint8_t> device_data(total_size);
+
+    uint8_t* start = host_data;
+    cutlass::gemm::GemmCoord* problem_sizes_host = reinterpret_cast<cutlass::gemm::GemmCoord*>(start);
+
+    // Apply the padding after the list of GemmCoords
+    start += num * sizeof(cutlass::gemm::GemmCoord) + padding;
+
+    int64_t ptr_A_offset = start - host_data;
+    DeviceKernel::ElementA** ptr_A_host = reinterpret_cast<DeviceKernel::ElementA**>(start);
+    start += num * sizeof(DeviceKernel::ElementA*);
+
+    int64_t ptr_B_offset = start - host_data;
+    DeviceKernel::ElementB** ptr_B_host = reinterpret_cast<DeviceKernel::ElementB**>(start);
+    start += num * sizeof(DeviceKernel::ElementB*);
+
+    int64_t ptr_C_offset = start - host_data;
+    DeviceKernel::ElementC** ptr_C_host = reinterpret_cast<DeviceKernel::ElementC**>(start);
+    start += num * sizeof(DeviceKernel::ElementC*);
+
+    int64_t ptr_D_offset = start - host_data;
+    DeviceKernel::ElementC** ptr_D_host = reinterpret_cast<DeviceKernel::ElementC**>(start);
+    start += num * sizeof(DeviceKernel::ElementC*);
+
+    int64_t lda_offset = start - host_data;
+    int64_t* lda_host = reinterpret_cast<int64_t*>(start);
+    start += num * sizeof(int64_t);
+
+    int64_t ldb_offset = start - host_data;
+    int64_t* ldb_host = reinterpret_cast<int64_t*>(start);
+    start += num * sizeof(int64_t);
+
+    int64_t ldc_offset = start - host_data;
+    int64_t* ldc_host = reinterpret_cast<int64_t*>(start);
+    start += num * sizeof(int64_t);
+
+    std::vector<at::Tensor> D(num);
+
+    bool need_C = (C != at::nullopt) && (beta != 0.f);
+    for (size_t i = 0; i < num; ++i) {
+        int M = A[i].size(0);
+        int N = B[i].size(1);
+        int K = A[i].size(1);
+        *(problem_sizes_host + i) = {M, N, K};
+        *(ptr_A_host + i) = reinterpret_cast<typename DeviceKernel::ElementA*>(A[i].contiguous().data_ptr());
+        *(ptr_B_host + i) = reinterpret_cast<typename DeviceKernel::ElementB*>(B[i].contiguous().data_ptr());
+
+        if (need_C) {
+            *(ptr_C_host + i) = reinterpret_cast<typename DeviceKernel::ElementC*>(C->at(i).contiguous().data_ptr());
+        }
+        else {
+            *(ptr_C_host + i) = nullptr;
+        }
+
+        D[i] = B[i].new_empty({M, N}, ${torch_type_C});
+        *(ptr_D_host + i) = reinterpret_cast<typename DeviceKernel::ElementC*>(D[i].contiguous().data_ptr());
+
+        *(lda_host + i) = DeviceKernel::LayoutA::packed({M, K}).stride(0);
+        *(ldb_host + i) = DeviceKernel::LayoutB::packed({K, N}).stride(0);
+        *(ldc_host + i) = DeviceKernel::LayoutC::packed({M, N}).stride(0);
+    }
+
+    device_data.copy_from_host(host_data);
+
+    cutlass::Status status = ${name}_kernel_run(
+        num,
+        reinterpret_cast<cutlass::gemm::GemmCoord*>(device_data.get()),
+        reinterpret_cast<DeviceKernel::ElementA**>(device_data.get() + ptr_A_offset),
+        reinterpret_cast<DeviceKernel::ElementB**>(device_data.get() + ptr_B_offset),
+        reinterpret_cast<DeviceKernel::ElementC**>(device_data.get() + ptr_C_offset),
+        reinterpret_cast<DeviceKernel::ElementC**>(device_data.get() + ptr_D_offset),
+        reinterpret_cast<int64_t*>(device_data.get() + lda_offset),
+        reinterpret_cast<int64_t*>(device_data.get() + ldb_offset),
+        reinterpret_cast<int64_t*>(device_data.get() + ldc_offset),
+        reinterpret_cast<int64_t*>(device_data.get() + ldc_offset),
+        ElementCompute(alpha), ElementCompute(beta));
+
+    delete[] host_data;
+
+    TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
+    return D;
+}
+"""
+)
+
+
+_PYTORCH_SETUP_PY = common._PYSTYLE_AUTOGEN_COMMENT + """
+from setuptools import setup
+from torch.utils.cpp_extension import BuildExtension, CUDAExtension
+
+setup(
+    name='${name}',
+    ext_modules=[
+        CUDAExtension('${name}', [
+            '${name}.cpp',
+            '${name}_kernel.cu',
+        ],
+        include_dirs=['${cutlass_path}/include', '${cutlass_path}/tools/util/include'],
+        extra_compile_args=['-std=c++17']
+        ),
+    ],
+    cmdclass={
+        'build_ext': BuildExtension
+    })
+
+"""
+
+
+def _generate_setup(name: str, sourcedir: str):
+    """
+    Generates a setup.py file for the extension
+
+    :param name: name of the module to generate
+    :type name: str
+    :param sourcedir: directory to which generated source files should be written
+    :type sourcedir: str
+    """
+    setup_py_file = os.path.join(sourcedir, "setup.py")
+    setup_source = SubstituteTemplate(
+        _PYTORCH_SETUP_PY, {"name": name, "cutlass_path": CUTLASS_PATH}
+    )
+    with open(setup_py_file, "w") as outfile:
+        outfile.write(setup_source)
+
+
+class _ArchListSetter:
+    """
+    Utility context manager for temporarily setting the value of the ``TORCH_CUDA_ARCH_LIST``
+    environment variable when building a PyTorch CUDA module.
+
+    ``TORCH_CUDA_ARCH_LIST`` is a space-delmited list of compute capabilites for which a PyTorch
+    CUDA module should be compiled.
+
+    For example, ``TORCH_CUDA_ARCH_LIST="7.0 8.0"`` would result in the inclusion of
+    ``-gencode=arch=compute_70,code=sm_70`` and ``-gencode=arch=compute_80,code=sm_80`` in the
+    compilation of the module.
+
+    This utility wraps the building of a PyTorch CUDA module with a setting of this environment
+    variable according to the current compute capability being targetted.
+
+    Example usage:
+
+    .. highlight:: python
+    .. code-block:: python
+
+        # Temporarily set TORCH_CUDA_ARCH_LIST="8.0"
+        with _ArchListSetter(80):
+            # Perform JIT compilation and loading of the module
+            mod = torch.utils.cpp_extension.load(...)
+
+    :param cc: compute capability
+    :type cc: int
+    """
+
+    _TORCH_CUDA_ARCH_LIST = "TORCH_CUDA_ARCH_LIST"
+
+    def __init__(self, cc: int):
+        self.cc_str = ".".join(list(str(cc)))
+
+    def __enter__(self):
+        """
+        Saves the old value of TORCH_CUDA_ARCH_LIST and reset it to the new value based on ``cc``
+        """
+        self.old_arch_list = os.getenv(_ArchListSetter._TORCH_CUDA_ARCH_LIST)
+        os.environ[_ArchListSetter._TORCH_CUDA_ARCH_LIST] = self.cc_str
+
+        return self
+
+    def __exit__(self, exc_type, exc_val, traceback):
+        """
+        Restores the old value of TORCH_CUDA_ARCH_LIST
+        """
+        os.environ[_ArchListSetter._TORCH_CUDA_ARCH_LIST] = self.old_arch_list
+
+
+def _jit(name: str, cc: int, cpp_file: str, cuda_file: str):
+    """
+    JIT compiles and loads a PyTorch CUDA extension.
+
+    :param name: name of the module to generate
+    :type name: str
+    :param cc: compute capability of the device the module should target
+    :type cc: int
+    :param cpp_file: path to file containing extension's C++ interface
+    :type cpp_file: str
+    :param cuda_file: path to file containing extension's CUDA interface
+    :type cuda_file: str
+
+    :return: loaded PyTorch module
+    """
+
+    from torch.utils.cpp_extension import load
+
+    extra_cuda_cflags = ["-std=c++17"]
+    if cc == 90:
+        # PyTorch does not currently add the sm_90a target when compute capability
+        # 9.0 is set within TORCH_CUDA_ARCH_LIST. Thus, we manually add the sm_90a target.
+        extra_cuda_cflags.append("-gencode=arch=compute_90a,code=sm_90a")
+
+    with _ArchListSetter(cc):
+        jitmodule = load(
+            name,
+            [cpp_file, cuda_file],
+            extra_cuda_cflags=extra_cuda_cflags,
+            extra_include_paths=[
+                os.path.join(CUTLASS_PATH, "include"),
+                os.path.join(CUTLASS_PATH, "tools/util/include"),
+            ],
+            verbose=(logger.level == logging.DEBUG)
+        )
+    return jitmodule
+
+
+def _pytorch_gemm(op, name: str, cc: int, jit: bool = False, sourcedir: str = ""):
+    """
+    Generates source for building a PyTorch CUDA module that leverages the CUTLASS GEMM
+    specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time
+    compiled, loaded, and returned.
+
+    :param op: operation to emit in the module
+    :param name: name of the module to generate
+    :type name: str
+    :param cc: compute capability of the device the module should target
+    :type cc: int
+    :param jit: whether the module should be just-in-time compiled
+    :type jit: bool
+    :param sourcedir: directory to which generated source files should be written
+    :type sourcedir: str
+
+    :return: loaded PyTorch module if ``jit=True`` or ``None`` otherwise
+    """
+    if sourcedir != "" and not os.path.isdir(sourcedir):
+        os.makedirs(sourcedir)
+
+    cuda_file = os.path.join(sourcedir, name + "_kernel.cu")
+    extra_kw = {}
+    if op.api == ApiVersion.v3x:
+        impl_template = _PYTORCH_GEMM_IMPL_TEMPLATE_3x
+    else:
+        impl_template = _PYTORCH_GEMM_IMPL_TEMPLATE_2x
+        if isinstance(op.swizzling_functor, swizzle.ThreadblockSwizzleStreamK):
+            extra_kw["args"] = common._CUTLASS_KERNEL_ARGS_2x_STREAM_K
+        else:
+            extra_kw["args"] = common._CUTLASS_KERNEL_ARGS_2x
+    impl_template = (
+        _PYTORCH_GEMM_IMPL_TEMPLATE_3x
+        if op.api == ApiVersion.v3x
+        else _PYTORCH_GEMM_IMPL_TEMPLATE_2x
+    )
+    cuda_impl = SubstituteTemplate(impl_template, {"name": name, **extra_kw})
+    cuda_source = SubstituteTemplate(
+        _PYTORCH_CUDA_TEMPLATE,
+        {
+            "includes": _PYTORCH_GEMM_INCLUDES[op.api],
+            "declaration": op.rt_module.emit(),
+            "procedural_name": op.procedural_name(),
+            "impl": cuda_impl,
+            "torch_type_C": _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element],
+        },
+    )
+    with open(cuda_file, "w") as outfile:
+        outfile.write(cuda_source)
+
+    cpp_file = os.path.join(sourcedir, name + ".cpp")
+    cpp_source = SubstituteTemplate(
+        _PYTORCH_GEMM_CPP_TEMPLATE,
+        {"name": name, "description": f"CUTLASS {op.procedural_name()} GEMM"},
+    )
+    with open(cpp_file, "w") as outfile:
+        outfile.write(cpp_source)
+
+    _generate_setup(name, sourcedir)
+
+    if jit:
+        return _jit(name, cc, cpp_file, cuda_file)
+
+    return None
+
+
+def _pytorch_grouped_gemm(
+    op, name: str, cc: int, jit: bool = False, sourcedir: str = ""
+):
+    """
+    Generates source for building a PyTorch CUDA module that leverages the CUTLASS grouped GEMM
+    specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time
+    compiled, loaded, and returned.
+
+    :param op: operation to emit in the module
+    :param name: name of the module to generate
+    :type name: str
+    :param cc: compute capability of the device the module should target
+    :type cc: int
+    :param jit: whether the module should be just-in-time compiled
+    :type jit: bool
+    :param sourcedir: directory to which generated source files should be written
+    :type sourcedir: str
+
+    :return: loaded PyTorch module if ``jit=True`` or ``None`` otherwise
+    """
+    if op.api != ApiVersion.v2x:
+        raise Exception("Grouped GEMM is currently only supported for CUTLASS 2.x")
+
+    if sourcedir != "" and not os.path.isdir(sourcedir):
+        os.makedirs(sourcedir)
+
+    cuda_file = os.path.join(sourcedir, name + "_kernel.cu")
+    cuda_impl = SubstituteTemplate(_PYTORCH_GROUPED_GEMM_IMPL_TEMPLATE, {"name": name})
+    cuda_source = SubstituteTemplate(
+        _PYTORCH_CUDA_TEMPLATE,
+        {
+            "includes": _PYTORCH_GROUPED_GEMM_INCLUDES,
+            "declaration": op.rt_module.emit(),
+            "procedural_name": op.procedural_name(),
+            "impl": cuda_impl,
+            "torch_type_C": _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element],
+        },
+    )
+    with open(cuda_file, "w") as outfile:
+        outfile.write(cuda_source)
+
+    cpp_file = os.path.join(sourcedir, name + ".cpp")
+    cpp_source = SubstituteTemplate(
+        _PYTORCH_GROUPED_GEMM_CPP_TEMPLATE,
+        {"name": name, "description": f"CUTLASS {op.procedural_name()} grouped GEMM"},
+    )
+    with open(cpp_file, "w") as outfile:
+        outfile.write(cpp_source)
+
+    _generate_setup(name, sourcedir)
+
+    if jit:
+        return _jit(name, cc, cpp_file, cuda_file)
+
+    return None
+
+
+
[docs]def pytorch(op, name: str, cc: int, jit: bool = False, sourcedir: str = ""): + """ + Generates source for building a PyTorch CUDA module that leverages the CUTLASS kernel + specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time + compiled, loaded, and returned. + + The result of this method is files within ``sourcedir`` that can be used for building + a PyTorch module. + + :param op: operation to emit in the module + :param name: name of the module to generate + :type name: str + :param cc: compute capability of the device the module should target + :type cc: int + :param jit: whether the module should be just-in-time compiled + :type jit: bool + :param sourcedir: directory to which generated source files should be written + :type sourcedir: str + + :return: loaded PyTorch module (if ``jit=True``) or None + """ + device_op = op.device_op() + if isinstance(op, GemmOperationUniversal): + return _pytorch_gemm(device_op, name, cc, jit, sourcedir) + elif isinstance(op, GemmOperationGrouped): + return _pytorch_grouped_gemm(device_op, name, cc, jit, sourcedir) + else: + raise Exception( + f"Operation type {type(op)} is not currently supported for PyTorch emission." + )
+
+
+
+ +
+ +
+
+ + + + + + + + + \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/epilogue.html b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/epilogue.html new file mode 100644 index 0000000000000000000000000000000000000000..da5e3ac2e6610edef4fbecc0dd86af64accbd43b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/epilogue.html @@ -0,0 +1,391 @@ + + + + + + + + + cutlass.epilogue - CUTLASS Python + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cutlass.epilogue

+#################################################################################################
+#
+# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#################################################################################################
+
+"""
+Registry of elementwise epilogues
+
+Elementwise epilogues can be added to many CUTLASS kernels in the CUTLAS Python interface via
+code like the following for GEMM:
+
+.. highlight:: python
+.. code-block:: python
+
+    plan = cutlass.op.Gemm(element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor)
+    plan.activation = cutlass.epilogue.relu
+"""
+
+from cutlass.backend import epilogue
+
+gelu = epilogue.gelu
+hardswish = epilogue.hardswish
+identity = epilogue.identity
+leaky_relu = epilogue.leaky_relu
+relu = epilogue.relu
+sigmoid = epilogue.sigmoid
+silu = epilogue.silu
+tanh = epilogue.tanh
+
+
+_activations = [gelu, hardswish, identity, leaky_relu, relu, sigmoid, silu, tanh]
+
+
+
[docs]def get_activations() -> list: + """ + Returns a list of available activation functions + + :return: list of available activation functions + :rtype: list + """ + return _activations
+ + +
[docs]def get_activation_epilogue( + activation, + element_output, + elements_per_access, + element_accumulator, + element_compute, +): + """ + Return an epilogue corresponding to the activation function, data types, and alignment + used in the kernel + + :param activation: elementwise activation function to use + :param element_output: data type of the output + :param elements_per_access: alignment of operand C of the kernel + :type elements_per_access: int + :param element_accumulator: data type of the accumulated output C + :param element_compute: data type in which compute operations should be performed + + :return: epilogue functor + """ + if activation not in _activations: + raise Exception( + f"Unsupported activation type {activation}. Available activations are: {_activations}" + ) + + if activation == identity: + return epilogue.LinearCombination( + element_output, elements_per_access, element_accumulator, element_compute + ) + else: + return epilogue.LinearCombinationGeneric( + activation(element_compute), + element_output, + elements_per_access, + element_accumulator, + element_compute, + )
+
+
+
+ +
+ +
+
+ + + + + + + + + \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/library_defaults.html b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/library_defaults.html new file mode 100644 index 0000000000000000000000000000000000000000..7618df86325ca408e3b52fcf1b8608983a890d4f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/library_defaults.html @@ -0,0 +1,729 @@ + + + + + + + + + cutlass.library_defaults - CUTLASS Python + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cutlass.library_defaults

+#################################################################################################
+#
+# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#################################################################################################
+
+"""
+Classes containing valid operations for a given compute capability and data types.
+"""
+
+import logging
+from cuda import __version__
+
+# Strip any additional information from the CUDA version
+_cuda_version = __version__.split("rc")[0]
+
+# Imports from CUTLASS profiler generator and manifest scripts
+import generator as prof_generator
+import manifest as prof_manifest
+
+import cutlass
+from cutlass.utils.check import valid_stage_count
+from cutlass.utils.datatypes import td_from_profiler_td, td_from_profiler_op, has_binding_type
+
+
+_generator_ccs = [50, 60, 61, 70, 75, 80, 90]
+
+
+
[docs]class KernelsForDataType: + """ + Container class for keeping track of kernels that correspond to a particular combination + of data types for operands A, B, and accumulator + """ + + def __init__(self, datatype_comb: tuple, layout_comb: tuple): + self.datatype_comb = datatype_comb + self.layout_comb = layout_comb + + # Dictionary mapping from alignment (int) to a list of kernels that fit the alignment + # constraint for the data type combination + self.kernels_by_alignment = {} + +
[docs] def add(self, operation): + """ + Add an operation to the list of supported kernels + """ + alignment = operation.A.alignment + if alignment not in self.kernels_by_alignment: + self.kernels_by_alignment[alignment] = [] + self.kernels_by_alignment[alignment].append(operation)
+ + @property + def alignments(self): + """ + Returns an unsorted list of alignments supported by this data type combination + + :return: unsorted list of alignments supported by this data type combination + :rtype: list + """ + return list(self.kernels_by_alignment.keys()) + + @property + def all_operations(self): + """ + Returns a list of all operations supported by this data type combination + + :return: list of all operations supported by this data type combination + :rtype: list + """ + ops = [] + for _, alignment_ops in self.kernels_by_alignment.items(): + ops.extend(alignment_ops) + return ops + +
[docs] def operations(self, alignment: int): + """ + Returns operations satisfying the alignment constraint indicated by `alignment` + + :param alignment: alignment constraint of operations to return + :type alignment: int + + :return: list of operations + :rtype: list + """ + if alignment not in self.kernels_by_alignment: + raise Exception( + f"No operations of alignment {alignment} found for data type and layout " + f"combination {self.datatype_comb} {self.layout_comb}" + ) + return self.kernels_by_alignment[alignment]
+ +
[docs] def find_alignment(self, shape: tuple, layout: cutlass.LayoutType) -> int: + """ + Returns the most preferable alignment for a given shape and layout + + :param shape: extent of each dimension of the tensor + :type shape: tuple + :param layout: layout of the tensor + :type layout: cutlass.LayoutType + + :return: maximum alignment supported by the data type combination and tensor size + :rtype: int + """ + # Determine the leading dimension of the shape + if layout == cutlass.LayoutType.RowMajor: + ld = shape[0] + elif layout == cutlass.LayoutType.RowMajor: + ld = shape[1] + else: + raise Exception(f"Unexpected or unsupported layout {layout}") + + for alignment in sorted(list(self.kernels_by_alignment.keys()), reverse=True): + if ld % alignment == 0: + return alignment + + # Default to alignment of 1 if no others match + return 1
+ +
[docs] def sort(self): + """ + Sorts each list of kernels in `kernels_by_alignment` in descending order of threadblock shape + """ + key = lambda op: ( + op.tile_description.threadblock_shape[0] + * op.tile_description.threadblock_shape[1] + * op.tile_description.threadblock_shape[2] + ) + for alignment in self.kernels_by_alignment.keys(): + self.kernels_by_alignment[alignment].sort(key=key, reverse=True)
+ + +
[docs]class ArchOptions: + """ + Structure for keeping track of kernels available on a given compute capability + + :param target_cc: compute capability of the device on which kernels will be run + :type target_cc: int + :param kernel_cc: compute capability of the kernels to generate + :type kernel_cc: int + :param operation_kind: type of operation to register + :type operation_kind: cutlass.OperationKind + :param gemm_kinds: types of GEMM operations that can be included + :type gemm_kinds: list + :param allowed_math_operations: types of primitive math operations allowed + :type allowed_math_operations: list + """ + + def __init__( + self, + target_cc: int, + kernel_cc: int, + operation_kind: cutlass.OperationKind, + gemm_kinds: list, + allowed_math_operations: list = [ + cutlass.MathOperation.multiply_add, + cutlass.MathOperation.multiply_add_saturate, + ] + ): + self.cc = kernel_cc + + # Dictionary with following structure: + # Key: OpcodeClass + # Value: Dictionary with the following structure: + # Key: tuple of ((DataType, DataType, DataType), (LayoutType, LayoutType, LayoutType), + # representing ((element_a, element_b, element_accumulator), (layout_a, layout_b)) + # Value: KernelsForDataType + self.operations_by_opclass = {} + self.op_class = None + self.allowed_math_operations = allowed_math_operations + + # Identify the method within CUTLASS generator script that generates kernel + # descriptions for the target CC + generate_function_name = "GenerateSM" + str(kernel_cc) + if not hasattr(prof_generator, generate_function_name): + cutlass.logger.warning(f"No generator found for architecture {kernel_cc}") + return + generate_function = getattr(prof_generator, generate_function_name) + + # Initialize a default manifest and populate it with valid kernel descriptions + # for the target CC + args = [ + "--kernels=all", + f"--log-level={logging.getLevelName(cutlass.logger.level)}" + ] + manifest_args = prof_generator.define_parser().parse_args(args) + manifest = prof_manifest.Manifest(manifest_args) + generate_function(manifest, _cuda_version) + + if operation_kind not in manifest.operations: + # No kernels generated for this architecture, this could be because the CUDA + # toolkit is insufficient to support operations in this CC + cutlass.logger.warning(f"No operations of type {operation_kind} found for CC {kernel_cc}") + return + + # Iterate through the available operations for this operation kind and + # find available opclasses and data types + for name, op_list in manifest.operations[operation_kind].items(): + for op in op_list: + if op.gemm_kind not in gemm_kinds: + continue + + mi = op.tile_description.math_instruction + if mi.math_operation not in self.allowed_math_operations: + continue + + datatype_comb = (mi.element_a, mi.element_b, mi.element_accumulator) + + # Skip any data types that do not currently have conversions via cutlass_bindings + if False in [has_binding_type(elt) for elt in datatype_comb]: + continue + + # Prune operations that don't fit in shared memory + td = td_from_profiler_op(op) + if not valid_stage_count(target_cc, td)[0]: + continue + + if mi.opcode_class not in self.operations_by_opclass: + self.operations_by_opclass[mi.opcode_class] = {} + + datatype_comb = (mi.element_a, mi.element_b, mi.element_accumulator) + layout_comb = (op.A.layout, op.B.layout) + + # Register TF32 kernels as F32 to enable F32 -> TF32 conversion + TF32 Tensor Core operations + if datatype_comb == (cutlass.DataType.tf32, cutlass.DataType.tf32, cutlass.DataType.f32): + # TF32 kernels only supported on SM80 and beyond + if self.cc < 80: + continue + elif self.cc == 90: + if (op.A.element != cutlass.DataType.f32 + or op.B.element != cutlass.DataType.f32 + or op.C.element != cutlass.DataType.f32): + continue + + datatype_comb = (cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32) + + opclass_dict = self.operations_by_opclass[mi.opcode_class] + key = (datatype_comb, layout_comb) + if key not in opclass_dict: + opclass_dict[key] = KernelsForDataType(datatype_comb, layout_comb) + opclass_dict[key].add(op) + + # Set the default opclass to TensorOp, if available. Otherwise default to SIMT + if cutlass.OpcodeClass.TensorOp in self.operations_by_opclass: + self.op_class = cutlass.OpcodeClass.TensorOp + else: + self.op_class = cutlass.OpcodeClass.Simt + + # The profiler's generator may generate only a limited set of combinations of operands for SIMT kernels. + # Here, we generate additional versions via a generic TileDescription. + if cutlass.OpcodeClass.Simt not in self.operations_by_opclass: + self.operations_by_opclass[cutlass.OpcodeClass.Simt] = {} + + types = [ + (cutlass.DataType.s8, cutlass.DataType.s8, cutlass.DataType.s8), + (cutlass.DataType.s8, cutlass.DataType.s8, cutlass.DataType.s32), + (cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16), + (cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f32), + (cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32), + (cutlass.DataType.f64, cutlass.DataType.f64, cutlass.DataType.f64), + ] + + layouts = [ + (cutlass.LayoutType.RowMajor, cutlass.LayoutType.RowMajor), + (cutlass.LayoutType.RowMajor, cutlass.LayoutType.ColumnMajor), + (cutlass.LayoutType.ColumnMajor, cutlass.LayoutType.RowMajor), + (cutlass.LayoutType.ColumnMajor, cutlass.LayoutType.ColumnMajor), + ] + alignment = 1 + epilogue_functor = cutlass.EpilogueFunctor.LinearCombination + swizzling_functor = cutlass.SwizzlingFunctor.Identity8 + for type_comb in types: + for layout_comb in layouts: + comb = (type_comb, layout_comb) + if comb in self.operations_by_opclass[cutlass.OpcodeClass.Simt]: + continue + + A = cutlass.TensorDescription(type_comb[0], layout_comb[0], alignment) + B = cutlass.TensorDescription(type_comb[1], layout_comb[1], alignment) + C = cutlass.TensorDescription(type_comb[2], cutlass.LayoutType.ColumnMajor, alignment) + math_inst = cutlass.MathInstruction( + [1, 1, 1], + type_comb[0], + type_comb[1], + type_comb[2], + cutlass.OpcodeClass.Simt, + cutlass.MathOperation.multiply_add + ) + + td = cutlass.TileDescription( + [128, 128, 8], 2, [4, 2, 1], math_inst, 50, 1024) + + # Prune operations that don't fit in shared memory + if not valid_stage_count(target_cc, td_from_profiler_td(td))[0]: + continue + + new_operation = prof_manifest.GemmOperation( + cutlass.GemmKind.Universal, td.minimum_compute_capability, + td, A, B, C, type_comb[2], epilogue_functor, swizzling_functor) + + new_kernels = KernelsForDataType(type_comb, layout_comb) + new_kernels.add(new_operation) + self.operations_by_opclass[cutlass.OpcodeClass.Simt][comb] = new_kernels + + # Sort all operations + for oc in self.operations_by_opclass.keys(): + for comb in self.operations_by_opclass[oc].keys(): + self.operations_by_opclass[oc][comb].sort() + +
[docs] def opclass_supports_combination( + self, op_class: cutlass.OpcodeClass, datatype_comb: tuple, layout_comb: tuple + ) -> bool: + """ + Returns whether the provided operation class supports the provided data type and layout combination + + :param op_class: operation class to consider + :type op_class: cutlass.OpcodeClass + :param datatype_comb: tuple of data types for (element_A, element_B, element_accumulator) + :type datatype_comb: tuple[cutlass.DataType] + :param layout_comb: tuple of data types for (layout_A, layout_B) + :type layout_comb: tuple[cutlass.LayoutType] + + :return: set of operation classes that support the provided data type and layout combination + :rtype: set + """ + if op_class not in self.operations_by_opclass: + raise Exception(f"Unexpected or unsupported operation class {op_class}") + + return (datatype_comb, layout_comb) in self.operations_by_opclass[op_class]
+ +
[docs] def supporting_opclasses( + self, + element_a: cutlass.DataType, + element_b: cutlass.DataType, + element_accumulator: cutlass.DataType, + layout_a: cutlass.LayoutType, + layout_b: cutlass.LayoutType, + ) -> set: + """ + Returns a set of operation classes that support the provided data type combination + + :param element_a: data type of operand A + :type element_a: cutlass.DataType + :param element_b: data type of operand B + :type element_b: cutlass.DataType + :param element_accumulator: data type of accumulator + :type element_accumulator: cutlass.DataType + :param layout_a: layout of operand A + :type layout_a: cutlass.LayoutType + :param layout_b: layout of operand B + :type layout_b: cutlass.LayoutType + + :return: set of operation classes that support the provided data type combination + :rtype: set + """ + supporting_op_classes = set() + datatype_comb = (element_a, element_b, element_accumulator) + layout_comb = (layout_a, layout_b) + + for op_class in self.operations_by_opclass.keys(): + if self.opclass_supports_combination(op_class, datatype_comb, layout_comb): + supporting_op_classes.add(op_class) + return supporting_op_classes
+ +
[docs] def operations( + self, + op_class: cutlass.OpcodeClass, + element_a: cutlass.DataType, + element_b: cutlass.DataType, + element_accumulator: cutlass.DataType, + layout_a: cutlass.LayoutType, + layout_b: cutlass.LayoutType, + ) -> KernelsForDataType: + """ + Returns whether the provided operation class supports the provided data type combination + + :param op_class: operation class to consider + :type op_class: cutlass.OpcodeClass + :param element_a: data type of operand A + :type element_a: cutlass.DataType + :param element_b: data type of operand B + :type element_b: cutlass.DataType + :param element_accumulator: data type of accumulator + :type element_accumulator: cutlass.DataType + :param layout_a: layout of operand A + :type layout_a: cutlass.LayoutType + :param layout_b: layout of operand B + :type layout_b: cutlass.LayoutType + + :return: container of kernels by alignment supported by the provided combination of parameters + :rtype: KernelsForDataType + """ + datatype_comb = (element_a, element_b, element_accumulator) + layout_comb = (layout_a, layout_b) + if not self.opclass_supports_combination(op_class, datatype_comb, layout_comb): + raise Exception( + f"Data type layout combination {datatype_comb}, {layout_comb} " + f"is not supported by opcode class {op_class} on CC {self.cc}." + ) + return self.operations_by_opclass[op_class][(datatype_comb, layout_comb)]
+ + +
[docs]class OptionRegistry: + """ + Container of all architecture-specific options + + :param target_cc: compute capability of the device on which operations will be run + :type target_cc: int + """ + + def __init__(self, target_cc: int): + self.registry = {} + + gemm_kinds = [cutlass.GemmKind.Universal, cutlass.GemmKind.Universal3x] + # Construct options for each CC + for kernel_cc in _generator_ccs: + self.registry[kernel_cc] = ArchOptions(target_cc, kernel_cc, cutlass.OperationKind.Gemm, gemm_kinds) + +
[docs] def options_for_cc(self, cc: int) -> ArchOptions: + return self.registry.get(cc, None)
+
+
+
+ +
+ +
+
+ + + + + + + + + \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/op/gemm.html b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/op/gemm.html new file mode 100644 index 0000000000000000000000000000000000000000..8214706b93a4416022e387f41aa108ad2e7366b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/op/gemm.html @@ -0,0 +1,980 @@ + + + + + + + + + cutlass.op.gemm - CUTLASS Python + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cutlass.op.gemm

+#################################################################################################
+#
+# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#################################################################################################
+
+"""
+    Ease-of-use interface for constructing, compiling, and running GEMMs.
+
+    The ``Gemm`` interface is meant to allow one to easily instantiate, compile, and run
+    GEMM operations in CUTLASS via Python, without specifying many configuration parameters.
+    Under the hood, the interface will select sensible default parameters for the many template
+    parameters for CUTLASS GEMMs.
+
+    Note: optimal performance is not to be expected from this interface. To achieve optimal
+    performance, one should specify and tune each configuration parameter.
+
+    The simplest example of using this interface is the following:
+
+    .. highlight:: python
+    .. code-block:: python
+
+        # A, B, C, and D are torch/numpy/cupy tensor objects
+        plan = cutlass.op.Gemm(A, B, C, D)
+        plan.run()
+
+
+    One can also use the interface by specifying data types of operands at construction
+    and using different tensor objects with these data types at runtime:
+
+    .. highlight:: python
+    .. code-block:: python
+
+        # The following is shorthand for:
+        #        cutlass.op.Gemm(element_A=torch.float32, element_B=torch.float32,
+        #                        element_C=torch.float32, element_D=torch.float32,
+        #                        element_accumulator=torch.float32,
+        #                        layout=cutlass.LayoutType.RowMajor)
+        plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass.LayoutType.RowMajor)
+
+        A0 = torch.rand((128, 256), device='cuda')
+        B0 = torch.rand((256, 64), device='cuda')
+        C0 = torch.zeros((128, 64), device='cuda')
+        D0 = torch.zeros((128, 64), device.'cuda')
+        plan.run(A0, B0, C0, D0)
+
+        A = torch.rand((32, 128), device='cuda')
+        B = torch.rand((128, 256), device='cuda')
+        C = torch.zeros((32, 256), device='cuda')
+        D = torch.zeros((32, 256), device.'cuda')
+        plan.run(A1, B1, C1, D1)
+
+    The interface additionally enables one to decouple the compilation of the underlying CUTLASS
+    kernel from its execution:
+
+    .. highlight:: python
+    .. code-block:: python
+
+        plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor)
+        plan.compile()
+
+        # Do other work...
+
+        plan.run(A0, B0, C0, D0)
+
+        # Do other work...
+
+        plan.run(A1, B1, C1, D1)
+
+    Elementwise activation functions are easily fused to the GEMM via the interface:
+
+    .. highlight:: python
+    .. code-block:: python
+
+        plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor)
+        plan.activation = cutlass.epilogue.relu
+
+    Operations can also be run asynchronously:
+
+    .. highlight:: python
+    .. code-block:: python
+
+        plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor)
+        args = plan.run()
+
+        # Do other work...
+
+        args.sync()
+"""
+
+import cutlass_bindings
+
+import cutlass
+from cutlass import epilogue, swizzle
+from cutlass.backend import compiler
+from cutlass.backend.gemm_operation import GemmArguments, GemmOperationUniversal
+from cutlass.backend.library import TensorDescription, TileDescription
+from cutlass.op.op import OperationBase
+from cutlass.utils import check, datatypes
+
+
+
[docs]class Gemm(OperationBase): + """ + Constructs a ``Gemm`` object. + + The data types and layouts of operands A, B, and C, along with the data type of output D + and that used for accumulation, are bound to the ``Gemm`` object throughout its lifetime -- + these are not to be changed after a ``Gemm`` has been constructed. + + The constructor has optional parameters for flexibly setting these parameters. The following + constructors are equivalent: + + .. highlight:: python + .. code-block:: python + + # Use F32 for A, B, C, D, and accumulation. All operands are row major. + + # Use the generic ``element`` and ``layout`` parameters to concisely set all data types and layouts + # for operands to the same values. + Gemm(element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor) + + # Explicitly specify the data types to use for A, B, C, and D. Use the generic ``layout``. + Gemm(element_A=cutlass.DataType.f32, element_B=cutlass.DataType.f32, element_C=cutlass.DataType.f32, + element_D=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor) + + # Set the data types and elements from existing tensors. Note that one can use different tensors when + # executing GEMM via the ``run()`` method than passed in here (though those passed in to ``run()`` must + # have the same data type and layout as those passed in here). + # A, B, C, and D are row-major torch.Tensor objects of type torch.float32 + Gemm(A=A, B=B, C=C, D=D) + + # Use the generic ``element`` and explicitly specify the layouts to use for A, B, and C (layout of D is + # the same as that for D, at present) + Gemm(element=cutlass.DataType.f32, layout_A=cutlass.LayoutType.RowMajor, + layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.RowMajor) + + # Explicitly specify the data type and layout for only some of A, B, C, and D. Unspecified data types + # and layouts will inherit those passed in via the generic ``element`` and ``layout`` + Gemm(element_A=cutlass.DataType.f32, layout_B=cutlass.LayoutType.RowMajor, + element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor) + + The order of precedence for the setting of the data type and layout for a given operand/output is as follows: + 1) If the tensor type is specified (e.g., ``A``), use the data type and layout inferred from this tensor + 2) Otherwise, if the data type/layout (e.g., ``element_A``, ``layout_A``) is specified, use those + 3) Otherwise, use the generic values (e.g., ``element``, ``layout``) + + :param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90 + :type cc: int + :param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80 + :type kernel_cc: int + :param A: tensor representing data type and layout of operand A + :param B: tensor representing data type and layout of operand B + :param C: tensor representing data type and layout of operand C + :param D: tensor representing data type and layout of operand D + :param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B + :param beta: scalar parameter beta from GEMM operation that scales operand C + :param element_accumulator: data type to be used in accumulation of the product of operands A and B + :type element_accumulator: cutlass.DataType + :param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type + :type element: cutlass.DataType + :param layout: generic layout type to be used for operands A, B, C, and D + :type layout: cutlass.LayoutType + :param element_A: data type to be used for operand A + :type element_A: cutlass.DataType + :param element_B: data type to be used for operand B + :type element_B: cutlass.DataType + :param element_C: data type to be used for operand C + :type element_C: cutlass.DataType + :param element_D: data type to be used for operand D + :type element_D: cutlass.DataType + :type layout_A: layout of operand A + :param layout_A: cutlass.LayoutType + :type layout_B: layout of operand B + :param layout_B: cutlass.LayoutType + :type layout_C: layout of operand C + :param layout_C: cutlass.LayoutType + :type layout_D: layout of operand D + :param layout_D: cutlass.LayoutType + """ + + def __init__( + self, A=None, B=None, C=None, D=None, + alpha=1.0, beta=0.0, element_accumulator=None, + element=None, layout=None, + element_A=None, element_B=None, element_C=None, element_D=None, + layout_A=None, layout_B=None, layout_C=None, + cc: int = None, kernel_cc: int = None + ): + super().__init__(cc=cc, kernel_cc=kernel_cc) + self.name = "gemm" + self.compiled = False + + elements = [] + layouts = [] + + # Check that at least one of the following is set for each tensor (illustrated assuming tensor A): + # ``A``, ``element_A``, ``element`` and ``A``, ``layout_A``, ``layout`` + for elt, lay, tens, name in zip([element_A, element_B, element_C, element_D], + [layout_A, layout_B, layout_C, layout_C], + [A, B, C, D], + ["A", "B", "C", "D"]): + if elt is not None and tens is not None: + raise Exception(f'Must not specify both element_{name} and tensor {name}') + if lay is not None and tens is not None: + raise Exception(f'Must not specify both layout_{name} and tensor {name}') + if elt is None and tens is None and element is None: + raise Exception(f'Must specify one of element_{name}, tensor {name}, or generic element.') + if lay is None and tens is None and layout is None: + raise Exception(f'Must specify one of layout_{name}, tensor {name}, or generic layout.') + + elt_to_set = None + lay_to_set = None + if tens is not None: + elt_to_set, lay_to_set = datatypes.get_datatype_and_layout(tens) + else: + elt_to_set = elt if elt is not None else element + lay_to_set = lay if lay is not None else layout + + elements.append(datatypes.library_type(elt_to_set)) + layouts.append(datatypes.library_layout(lay_to_set)) + + self._element_a, self._element_b, self._element_c, self._element_d = elements + self._layout_a, self._layout_b, self._layout_c, self._layout_d = layouts + + if element_accumulator is None: + self._element_accumulator = self._element_c + else: + self._element_accumulator = datatypes.library_type(element_accumulator) + + self.A = A + self.B = B + self.C = C + self.D = D + + self.alpha = alpha + self.beta = beta + + self.epilogue_functor = None + self.op_class = None + + self._reset_operations() + + self._swizzling_functor = cutlass.swizzle.IdentitySwizzle1 + + def _reset_operations(self, reset_epilogue: bool = True): + # Set the default op class + datatype_comb = (self._element_a, self._element_b, self._element_accumulator) + layout_comb = (self._layout_a, self._layout_b) + self.possible_op_classes = self.options.supporting_opclasses( + self._element_a, self._element_b, self._element_accumulator, + self._layout_a, self._layout_b) + + if cutlass.OpcodeClass.TensorOp in self.possible_op_classes: + self.opclass = cutlass.OpcodeClass.TensorOp + elif cutlass.OpcodeClass.Simt in self.possible_op_classes: + self.opclass = cutlass.OpcodeClass.Simt + else: + raise Exception(f'No kernel configuration found for supported data type and layout ' + f'combination {datatype_comb}x{layout_comb}') + + if reset_epilogue: + self._reset_epilogue_functor_activation(epilogue.identity) + + def _reset_epilogue_functor_activation(self, activation): + if self.epilogue_functor is None: + if self.op_class == cutlass.OpcodeClass.Simt: + elements_per_access = 1 + else: + elements_per_access = 128 // cutlass.DataTypeSize[self._element_c] + else: + elements_per_access = self.epilogue_functor.epilogue_vector_length + + if not self.specified_kernel_cc: + if self.current_cc == 90 and activation != epilogue.identity: + # CUTLASS 3.0 kernels currently only support identity activation. If one requests a non-identity activation, + # revert to using a CUTLASS 2.x kernel by using SM80-tagged kernels. + cutlass.logger.warning("Reverting to using SM80-tagged kernel. Opclass may change.") + self._reset_options(80) + self._reset_operations(reset_epilogue=False) + elif (self.cc == 90 and self.current_cc != 90 and activation == epilogue.identity): + # SM80 fallback kernels are currently used. Since an identity activation is requested, + # we can switch back to using SM90 kernels. + self._reset_options(90) + self._reset_operations(reset_epilogue=False) + else: + if self.current_cc == 90 and activation != epilogue.identity: + raise Exception("Epilogues with elementwise fusion are not currently supported " + "in the Python interface for 3.x kernels. To use 2.x kernels " + "with fused elementwise epilogues, do not set the `kernel_cc` " + "parameter when constructing the Gemm object.") + + self.epilogue_functor = epilogue.get_activation_epilogue( + activation, + datatypes.binding_type(self._element_c), + elements_per_access, + datatypes.binding_type(self._element_accumulator), + datatypes.binding_type(self._element_accumulator), + ) + + def _reset_epilogue_functor_alignment(self, alignment): + if self.epilogue_functor is None or not hasattr(self.epilogue_functor, 'activation_functor'): + activation = epilogue.identity + else: + activation = type(self.epilogue_functor.activation_functor) + + self.epilogue_functor = epilogue.get_activation_epilogue( + activation, + datatypes.binding_type(self._element_c), + alignment, + datatypes.binding_type(self._element_accumulator), + datatypes.binding_type(self._element_accumulator), + ) + + @property + def activation(self): + """ + Returns the type of the current activation function used + """ + return type(self.epilogue_functor.activation_functor) + + @activation.setter + def activation(self, act): + """ + Sets the type of the activation function to use + """ + self._reset_epilogue_functor_activation(act) + + @property + def opclass(self) -> cutlass.OpcodeClass: + """ + Returns the opcode class currently in use by the GEMM + + :return: opcode class currently in use + :rtype: cutlass.OpcodeClass + """ + return self.op_class + + @opclass.setter + def opclass(self, oc: cutlass.OpcodeClass): + """ + Sets the opcode class to use in the GEMM. If the opcode class is not supported under + the given compute capability and element/layout combinations of the GEMM, an exception is raised. + """ + if oc in self.possible_op_classes: + self.op_class = oc + else: + raise Exception( + f'Unsupported operation class {oc} for CC {self.cc} and data type combination ' + f'({self._element_a}, {self._element_b}, {self._element_accumulator}) and ' + f'layout combination ({self._layout_a}, {self._layout_b}).') + + # Changing the op class changes the elements per access in the epilogue. Reset this. + if self.op_class == cutlass.OpcodeClass.Simt: + elements_per_access = 1 + else: + elements_per_access = 128 // cutlass.DataTypeSize[self._element_c] + + if self.epilogue_functor is not None: + self._reset_epilogue_functor_alignment(elements_per_access) + + # Changing the op class also changes the possible operations available. Reset these. + self.possible_operations = self.options.operations( + self.op_class, self._element_a, self._element_b, + self._element_accumulator, self._layout_a, self._layout_b) + + @property + def swizzling_functor(self): + """ + Returns the type of the swizzling functor currently being used by the GEMM + + :return: swizzing functor type + """ + return self._swizzling_functor + + @swizzling_functor.setter + def swizzling_functor(self, swizzling_functor): + """ + Sets the swizzling functor to the type specified by `swizzling_functor` + """ + if swizzling_functor == swizzle.ThreadblockSwizzleStreamK: + if self.op_class == cutlass.OpcodeClass.Simt: + raise Exception('ThreadblockSwizzleStreamK is currently only supported with opcode class TensorOp') + + if self.current_cc == 90: + raise Exception('ThreadblockSwizzleStreamK is currently unsupported on SM90') + self._swizzling_functor = swizzling_functor + + def _valid_tile_description(self, td: TileDescription) -> tuple: + """ + Checks whether the provided tile description is valid for the given compute capability. At present, + this checks the following: + + - Does the tile description use a number of stages supported by the compute capability in question? + - Does the tile size requested fit within shared memory? + - Are cluster dimensions outside the valid range requested for a given architecture (e.g., + more non-unit cluster dimensions for pre-SM90 architectures)? + - Is the kernel schedule being used supported on the architecture in question? + + :param td: tile description to validate + :type td: cutlass.backend.TileDescription + :return: tuple in which the first element is a bool indicating that the tile description is valid + and the second element is a string providing an optional error message. + :rtype: tuple + """ + # Check stage count based on the CC to which we are compiling (self.cc), rather + # than the CC from which we find kernels (self.current_cc) + valid, msg = check.valid_stage_count(self.cc, td) + if not valid: + return (valid, msg) + + valid, msg = check.valid_cluster_shape(self.current_cc, td.cluster_shape) + if not valid: + return (valid, msg) + + valid, msg = check.valid_kernel_schedule(self.current_cc, td.kernel_schedule) + return valid, msg + +
[docs] def tile_descriptions(self) -> list: + """ + Returns a list of valid tile descriptions for the operations + + :returns: list of valid tile descriptions for the operations + :rtype: list + """ + return [datatypes.td_from_profiler_op(op) for op in self.possible_operations.all_operations]
+ +
[docs] def construct( + self, tile_description: TileDescription = None, + alignment_A: int = None, alignment_B: int = None, alignment_C: int = None) -> GemmOperationUniversal: + """ + Constructs a ``cutlass.backend.GemmUniversalOperation`` based on the input parameters and current + kernel specification of the ``Gemm`` object. + + :param tile_description: tile description specifying shapes and operand types to use in the kernel + :type tile_description: cutlass.backend.TileDescription + :param alignment_A: alignment of operand A + :type alignment_A: int + :param alignment_B: alignment of operand B + :type alignment_B: int + :param alignment_C: alignment of operand C + :type alignment_C: int + + :return: operation that was constructed + :rtype: cutlass.backend.GemmOperationUniversal + """ + alignment_pref_A = min(128 // cutlass.DataTypeSize[self._element_a], max(self.possible_operations.alignments)) + alignment_pref_B = min(128 // cutlass.DataTypeSize[self._element_b], max(self.possible_operations.alignments)) + alignment_pref_C = min(128 // cutlass.DataTypeSize[self._element_c], max(self.possible_operations.alignments)) + alignment_A = check.alignment_or_default(alignment_A, alignment_pref_A) + alignment_B = check.alignment_or_default(alignment_B, alignment_pref_B) + alignment_C = check.alignment_or_default(alignment_C, alignment_pref_C) + + self._reset_epilogue_functor_alignment(alignment_C) + + tensor_A = TensorDescription( + datatypes.binding_type(self._element_a), + datatypes.binding_layout(self._layout_a), + alignment_A + ) + tensor_B = TensorDescription( + datatypes.binding_type(self._element_b), + datatypes.binding_layout(self._layout_b), + alignment_B + ) + tensor_C = TensorDescription( + datatypes.binding_type(self._element_c), + datatypes.binding_layout(self._layout_c), + alignment_C + ) + + if tile_description is None: + op = self.possible_operations.operations(alignment_A)[0] + tile_description = datatypes.td_from_profiler_op(op) + else: + valid, err_str = self._valid_tile_description(tile_description) + if not valid: + raise Exception(f"Invalid tile description. {err_str}") + self.tile_description = tile_description + + operation = GemmOperationUniversal( + arch=self.current_cc, + tile_description=tile_description, + A=tensor_A, B=tensor_B, C=tensor_C, + epilogue_functor=self.epilogue_functor, + swizzling_functor=self._swizzling_functor, + ) + + return operation
+ +
[docs] def compile(self, tile_description: TileDescription = None, + alignment_A: int = None, alignment_B: int = None, alignment_C: int = None, + print_module: bool = False) -> cutlass.backend.GemmOperationUniversal: + """ + Emits and compiles the kernel currently specified. If ``tile_description`` and any + of the ``alignment`` parameters are set, the kernel will be chosen using this + tile description and alignments. Otherwise, a default tile description and alignment + will be used. + + :param tile_description: tile description specifying shapes and operand types to use in the kernel + :type tile_description: cutlass.backend.TileDescription + :param alignment_A: alignment of operand A + :type alignment_A: int + :param alignment_B: alignment of operand B + :type alignment_B: int + :param alignment_C: alignment of operand C + :type alignment_C: int + :param print_module: whether to print the emitted C++ code + :type print_module: bool + + :return: operation that was compiled + :rtype: cutlass.backend.GemmOperationUniversal + """ + self.operation = self.construct(tile_description, alignment_A, alignment_B, alignment_C) + + if print_module: + print(self.operation.rt_module.emit()) + + compiler.add_module([self.operation,]) + return self.operation
+ + def _verify_type_and_layout(self, tensor, ref_type, ref_layout, name): + """ + Verifies that ``tensor`` has data type ``ref_type`` and layout ``ref_layout``. An exception + is raised if it does not. + + :param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in + :type tensor: numpy/cupy/torch array/tensor object + :param ref_dtype: data type for the tensor that this object was initialized to + :param ref_layout: layout for the tensor that this object was initialized to + :param name: identifier of the tensor to verify. Used in raising exceptions + :type name: str + """ + dtype, layout = datatypes.get_datatype_and_layout(tensor) + if dtype != ref_type or layout != ref_layout: + raise Exception(f'Tensor {name} with type and layout ({dtype}, {layout}) ' + f'does not match the expected type and ' + f'layout of ({ref_type}, {ref_layout}).') + + def _verify_tensor(self, tensor, ref_tensor, ref_dtype, ref_layout, name): + """ + Verifies the following properties: + 1) Either ``tensor`` or ``ref_tensor`` must be set (i.e., not ``None``) + 2) If ``tensor`` is not ``None``, its datatype and layout must match matches the current versions + set by the plan (i.e., those in ``ref_dtype`` and ``ref_layout``) + + If either of these properties does not hold, an exception is raised. If these properties hold and + ``tensor`` is not ``None``, ``tensor`` is returned. Otherwise, ``ref_tensor`` is returned. + + :param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in + :type tensor: numpy/cupy/torch array/tensor object + :param ref_tensor: object representing a tensor passed in on construction of this object, or ``None`` if no tensor was passed in + :type ref_tensor: numpy/cupy/torch array/tensor object + :param ref_dtype: data type for the tensor that this object was initialized to + :param ref_layout: layout for the tensor that this object was initialized to + :param name: identifier of the tensor to verify. Used in raising exceptions + :type name: str + + :return: valid tensor object to use + :rtype: numpy/cupy/torch array/tensor object + """ + if tensor is None: + if ref_tensor is None: + raise Exception(f"Tensor {name} must be set.") + return ref_tensor + + self._verify_type_and_layout(tensor, ref_dtype, ref_layout, name) + return tensor + + def _verify_scalar(self, scalar, ref_scalar, ref_dtype, name): + """ + Verifies the following properties: + 1) Either ``scalar`` or ``ref_scakar`` must be set (i.e., not ``None``) + 2) If ``scalar`` is not ``None``, its datatype must match matches the current version + set by the plan (i.e., those in ``ref_dtype``) + + If either of these properties does not hold, an exception is raised. If these properties hold and + ``scalar`` is not ``None``, ``scalar`` is returned. Otherwise, ``ref_scalar`` is returned. + + :param scalar: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in + :type scalar: numpy/cupy/torch scalar + :param ref_scalar: object representing a tensor passed in on construction of this object, or ``None`` if no tensor was passed in + :type ref_scalar: numpy/cupy/torch scalar + :param ref_dtype: data type for the scalar that this object was initialized to + :param name: identifier of the scalar to verify. Used in raising exceptions + :type name: str + + :return: valid scalar to use + :rtype: numpy/cupy/torch scalar + """ + if scalar is None: + if ref_scalar is None: + raise Exception(f"Scalar {name} must be set.") + return ref_scalar + dtype = datatypes.library_type(scalar.dtype) + if dtype != ref_dtype: + raise Exception( + f"Tensor {name} with type {dtype} does not match expected type {ref_dtype}." + ) + return scalar + +
[docs] def run(self, A=None, B=None, C=None, D=None, + alpha=None, beta=None, batch_count: int = 1, + sync: bool = True, print_module: bool = False) -> GemmArguments: + """ + Runs the kernel currently specified. If it has not already been, the kernel is emitted and + compiled. Tensors holding operands and outputs of the kernel are sourced either from the + ``A``, ``B``, ``C``, ``D``, ``alpha``, and ``beta`` + parameters provided in this call, or from those + passed in on the construction of this object -- one of the two must be specified. + + By default, this call returns only once the kernel has completed. To launch the kernel + and immediately return, set ``sync=False``. In this case, it is the responsibility of the + caller to syncrhonize the results of the kernel before attempting to access outputs + by calling ``sync()`` on the arguments returned from this call. + + :param A: tensor representing data type and layout of operand A + :param B: tensor representing data type and layout of operand B + :param C: tensor representing data type and layout of operand C + :param D: tensor representing data type and layout of operand D + :param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B + :param beta: scalar parameter beta from GEMM operation that scales operand C + :param batch_count: number of GEMMs in the batch + :type batch_count: int + :param sync: whether the call should wait for the kernel to complete before returning + :type sync: bool + :param print_module: whether to print the emitted C++ code + :type print_module: bool + + :return: arguments passed in to the kernel + :rtype: cutlass.backend.GemmArguments + """ + if batch_count < 1: + raise Exception(f"Invalid batch count {batch_count}. Value must be an integer >= 1.") + + A = self._verify_tensor(A, self.A, self._element_a, self._layout_a, "A") + B = self._verify_tensor(B, self.B, self._element_b, self._layout_b, "B") + C = self._verify_tensor(C, self.C, self._element_c, self._layout_c, "C") + D = self._verify_tensor(D, self.D, self._element_d, self._layout_d, "D") + alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha") + beta = self._verify_scalar(beta, self.beta, self._element_c, "beta") + + alignment_a = self.possible_operations.find_alignment(A.shape, self._layout_a) + alignment_b = self.possible_operations.find_alignment(B.shape, self._layout_b) + alignment_c = self.possible_operations.find_alignment(C.shape, self._layout_c) + self.compile(self.tile_description, alignment_A=alignment_a, alignment_B=alignment_b, + alignment_C=alignment_c, print_module=print_module) + + problem_size = cutlass_bindings.gemm.GemmCoord(A.shape[0], B.shape[1], A.shape[1]) + + if batch_count == 1: + mode = cutlass_bindings.gemm.Mode.Gemm + kwargs = {'split_k_slices': 1} + else: + mode = cutlass_bindings.gemm.Mode.Batched + kwargs = {'batch': batch_count} + + arguments = GemmArguments( + operation=self.operation, problem_size=problem_size, + A=A, B=B, C=C, D=D, + output_op=self.operation.epilogue_type(alpha, beta), + gemm_mode=mode, + **kwargs + ) + + self.operation.run(arguments) + + if sync: + arguments.sync() + + return arguments
+
+
+
+ +
+ +
+
+ + + + + + + + + \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/op/gemm_grouped.html b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/op/gemm_grouped.html new file mode 100644 index 0000000000000000000000000000000000000000..b227ab693463a3b51fee405a0f5c354552e54340 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/op/gemm_grouped.html @@ -0,0 +1,554 @@ + + + + + + + + + cutlass.op.gemm_grouped - CUTLASS Python + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cutlass.op.gemm_grouped

+#################################################################################################
+#
+# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#################################################################################################
+
+"""
+    Ease-of-use interface for constructing, compiling, and running GEMMs.
+
+    The ``GroupedGemm`` interface is meant to allow one to easily instantiate, compile, and run
+    grouped GEMM operations in CUTLASS via Python, without specifying many configuration parameters.
+    Under the hood, the interface will select sensible default parameters for the many template
+    parameters for CUTLASS grouped GEMMs.
+
+    Note: optimal performance is not to be expected from this interface. To achieve optimal
+    performance, one should specify and tune each configuration parameter.
+
+    The simplest example of using this interface is the following:
+
+    .. highlight:: python
+    .. code-block:: python
+
+        # As, Bs, Cs, and Ds are torch/numpy/cupy tensor objects
+        plan = cutlass.op.GroupedGemm(element=cutlass.DataType.f16, layout=cutlass.LayoutType.RowMajor)
+        plan.run([A0, A1], [B0, B1], [C0, C1], [D0, D1])
+"""
+
+import cutlass_bindings
+
+from cutlass.backend.gemm_operation import (
+    GemmGroupedArguments,
+    GemmOperationGrouped,
+)
+from cutlass.backend.library import (
+    DataTypeSize,
+    SchedulerMode,
+    TensorDescription,
+    TileDescription,
+)
+from cutlass.op.gemm import Gemm
+from cutlass.utils import check, datatypes
+
+
+
[docs]class GroupedGemm(Gemm): + """ + Constructs a ``GroupedGemm`` object. + + The data types and layouts of operands A, B, and C, along with the data type of output D + and that used for accumulation, are bound to the ``GroupedGemm`` object throughout its lifetime -- + these are not to be changed after a ``GroupedGemm`` has been constructed. + + The constructor has optional parameters for flexibly setting these parameters. Please see the constructor + for ``Gemm`` for examples of these. + + :param cc: compute capability of device to generate kernels for + :type cc: int + :param A: tensor representing data type and layout of operands A + :param B: tensor representing data type and layout of operands B + :param C: tensor representing data type and layout of operands C + :param D: tensor representing data type and layout of operands D + :param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B + :param beta: scalar parameter beta from GEMM operation that scales operand C + :param element_accumulator: data type to be used in accumulation of the product of operands A and B + :type element_accumulator: cutlass.DataType + :param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type + :type element: cutlass.DataType + :param layout: generic layout type to be used for operands A, B, C, and D + :type layout: cutlass.LayoutType + :param element_A: data type to be used for operand A + :type element_A: cutlass.DataType + :param element_B: data type to be used for operand B + :type element_B: cutlass.DataType + :param element_C: data type to be used for operand C + :type element_C: cutlass.DataType + :param element_D: data type to be used for operand D + :type element_D: cutlass.DataType + :type layout_A: layout of operand A + :param layout_A: cutlass.LayoutType + :type layout_B: layout of operand B + :param layout_B: cutlass.LayoutType + :type layout_C: layout of operand C + :param layout_C: cutlass.LayoutType + :type layout_D: layout of operand D + :param layout_D: cutlass.LayoutType + """ + + def __init__( + self, A=None, B=None, C=None, D=None, + alpha=1.0, beta=0.0, element_accumulator=None, + element=None, layout=None, + element_A=None, element_B=None, element_C=None, element_D=None, + layout_A=None, layout_B=None, layout_C=None, + cc: int = None, + ): + super().__init__( + A=A, B=B, C=C, D=D, + alpha=alpha, beta=beta, + element_accumulator=element_accumulator, + element=element, layout=layout, + element_A=element_A, element_B=element_B, + element_C=element_C, element_D=element_D, + layout_A=layout_A, layout_B=layout_B, layout_C=layout_C, + cc=cc + ) + + # Grouped GEMM specializations for SM90 are currently unavailable. Revert to using SM80 + if self.current_cc == 90: + self._reset_options(80) + self._reset_operations(reset_epilogue=False) + + self.name = "grouped_gemm" + + @Gemm.swizzling_functor.setter + def swizzling_functor(self, swizzling_functor): + """ + Sets the swizzling functor to the type specified by `swizzling_functor` + """ + raise Exception('Grouped GEMM does not currently support different swizzling functors') + +
[docs] def construct(self, tile_description: TileDescription = None, + alignment_A: int = None, + alignment_B: int = None, + alignment_C: int = None) -> GemmOperationGrouped: + """ + Constructs a ``cutlass.backend.GemmOperationGrouped`` based on the input parameters and current + kernel specification of the ``Gemm`` object. + + :param tile_description: tile description specifying shapes and operand types to use in the kernel + :type tile_description: cutlass.backend.TileDescription + :param alignment_A: alignment of operand A + :type alignment_A: int + :param alignment_B: alignment of operand B + :type alignment_B: int + :param alignment_C: alignment of operand C + :type alignment_C: int + + :return: operation that was constructed + :rtype: cutlass.backend.GemmOperationGrouped + """ + alignment_preference = max(self.possible_operations.alignments) + alignment_A = check.alignment_or_default(alignment_A, alignment_preference) + alignment_B = check.alignment_or_default(alignment_B, alignment_preference) + alignment_C = check.alignment_or_default(alignment_C, alignment_preference) + + self._reset_epilogue_functor_alignment(alignment_C) + + tensor_A = TensorDescription( + datatypes.binding_type(self._element_a), + datatypes.binding_layout(self._layout_a), + alignment_A + ) + tensor_B = TensorDescription( + datatypes.binding_type(self._element_b), + datatypes.binding_layout(self._layout_b), + alignment_B + ) + tensor_C = TensorDescription( + datatypes.binding_type(self._element_c), + datatypes.binding_layout(self._layout_c), + alignment_C + ) + + if tile_description is None: + op = self.possible_operations.operations(alignment_A)[0] + tile_description = datatypes.td_from_profiler_op(op) + else: + valid, err_str = self._valid_tile_description(tile_description) + if not valid: + raise Exception(f"Invalid tile description. {err_str}") + self.tile_description = tile_description + + operation = GemmOperationGrouped( + arch=self.current_cc, + tile_description=tile_description, + A=tensor_A, B=tensor_B, C=tensor_C, + epilogue_functor=self.epilogue_functor, + swizzling_functor=self._swizzling_functor, + precompute_mode=SchedulerMode.Device) + + return operation
+ +
[docs] def run(self, A, B, C, D, + alpha=None, beta=None, sync: bool = True, + print_module: bool = False) -> GemmGroupedArguments: + """ + Runs the kernel currently specified. + + By default, this call returns only once the kernel has completed. To launch the kernel + and immediately return, set ``sync=False``. In this case, it is the responsibility of the + caller to syncrhonize the results of the kernel before attempting to access outputs + by calling ``sync()`` on the arguments returned from this call. + + :param A: list of tensors representing data type and layout of operand A + :type A: list + :param B: list of tensors representing data type and layout of operand B + :type B: list + :param C: list of tensors representing data type and layout of operand C + :type C: list + :param D: list of tensors representing data type and layout of operand D + :type D: list + :param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B + :param beta: scalar parameter beta from GEMM operation that scales operand C + :param sync: whether the call should wait for the kernel to complete before returning + :type sync: bool + :param print_module: whether to print the emitted C++ code + :type print_module: bool + + :return: arguments passed in to the kernel + :rtype: cutlass.backend.GemmGroupedArguments + """ + if len(A) != len(B) or len(A) != len(C) or len(A) != len(D): + raise Exception("Lengths of A, B, C, and D lists must be equal") + + problem_sizes = [] + As, Bs, Cs, Ds = ([None] * len(A) for _ in range(4)) + for i in range(len(A)): + As[i] = self._verify_tensor(A[i], self.A, self._element_a, self._layout_a, "A") + Bs[i] = self._verify_tensor(B[i], self.B, self._element_b, self._layout_b, "B") + Cs[i] = self._verify_tensor(C[i], self.C, self._element_c, self._layout_c, "C") + Ds[i] = self._verify_tensor(D[i], self.D, self._element_d, self._layout_d, "D") + problem_sizes.append(cutlass_bindings.gemm.GemmCoord(A[i].shape[0], B[i].shape[1], A[i].shape[1])) + + alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha") + beta = self._verify_scalar(beta, self.beta, self._element_c, "beta") + + alignment_a = min((self.possible_operations.find_alignment(A.shape, self._layout_a) for A in As)) + alignment_b = min((self.possible_operations.find_alignment(B.shape, self._layout_b) for B in Bs)) + alignment_c = min((self.possible_operations.find_alignment(C.shape, self._layout_c) for C in Cs)) + self.compile(self.tile_description, alignment_A=alignment_a, alignment_B=alignment_b, + alignment_C=alignment_c, print_module=print_module) + + arguments = GemmGroupedArguments( + operation=self.operation, + problem_sizes=problem_sizes, + A=As, B=Bs, C=Cs, D=Ds, + output_op=self.operation.epilogue_type(alpha, beta) + ) + + self.operation.run(arguments) + + if sync: + arguments.sync() + + return arguments
+
+
+
+ +
+ +
+
+ + + + + + + + + \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/op/op.html b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/op/op.html new file mode 100644 index 0000000000000000000000000000000000000000..e13244b9c176fcc5e20e4439cb7d2a84d03cc5ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/op/op.html @@ -0,0 +1,400 @@ + + + + + + + + + cutlass.op.op - CUTLASS Python + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cutlass.op.op

+#################################################################################################
+#
+# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#################################################################################################
+
+"""
+Base operation used for defining high-level CUTLASS operations (e.g., GEMM, Conv2d)
+"""
+
+from bisect import bisect_left
+
+from cutlass import option_registry
+from cutlass.backend.utils.device import device_cc
+from cutlass.epilogue import get_activations
+from cutlass.library_defaults import _generator_ccs
+from cutlass.swizzle import get_swizzling_functors
+
+
+
[docs]class OperationBase: + """ + Base operation used for defining high-level CUTLASS operations (e.g., GEMM, Conv2d) + """ + + def __init__(self, cc: int = None, kernel_cc: int = None): + """ + :param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90 + :type cc: int + :param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80 + :type kernel_cc: int + """ + self.cc = cc if cc is not None else device_cc() + self.specified_kernel_cc = kernel_cc is not None + self.current_cc = kernel_cc if kernel_cc is not None else self._find_closest_cc(self.cc) + self.tile_description = None + + self.options = option_registry.options_for_cc(self.current_cc) + + if self.options is None: + raise Exception(f"Invalid or unsupported compute capability: {self.current_cc}") + + def _find_closest_cc(self, cc: int) -> int: + """ + Returns the closest CC in _generator_ccs less than or equal to `cc` + + :param cc: compute capability to query + :type cc: int + + :returns: closest CC in _generator_ccs less than or equal to `cc` + :rtype: int + """ + if cc in _generator_ccs: + return cc + + # Find closest CC lower than this CC + idx = bisect_left(_generator_ccs, cc) + if idx == 0: + raise Exception(f'No valid CC to fall back to for {cc}') + return _generator_ccs[idx-1] + +
[docs] def activations(self) -> list: + """ + Returns possible activation functions that can be used + + :return: list of activation functions that can be used + :rtype: list + """ + return get_activations()
+ +
[docs] def swizzling_functors(self) -> list: + """ + Returns possible swizzling functions that can be used + + :return: list of swizzling functions that can be used + :rtype: list + """ + return get_swizzling_functors()
+ + def _reset_options(self, cc: int): + """ + Resets the kernel options based on cc + + :param cc: compute capability to reset to + :type cc: int + """ + if cc != self.current_cc: + if cc not in _generator_ccs: + raise Exception(f'Invalid CC for CUTLASS kernels: {cc}.') + self.current_cc = cc + self.options = option_registry.options_for_cc(self.current_cc)
+
+
+
+ +
+ +
+
+ + + + + + + + + \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/swizzle.html b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/swizzle.html new file mode 100644 index 0000000000000000000000000000000000000000..ba581cbde9e805e5b1772c3213312171c87be7df --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/swizzle.html @@ -0,0 +1,350 @@ + + + + + + + + + cutlass.swizzle - CUTLASS Python + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cutlass.swizzle

+#################################################################################################
+#
+# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#################################################################################################
+
+"""
+Registry of swizzling functions
+"""
+
+import cutlass_bindings
+
+IdentitySwizzle1 = cutlass_bindings.IdentitySwizzle1
+IdentitySwizzle2 = cutlass_bindings.IdentitySwizzle2
+IdentitySwizzle4 = cutlass_bindings.IdentitySwizzle4
+IdentitySwizzle8 = cutlass_bindings.IdentitySwizzle8
+HorizontalSwizzle = cutlass_bindings.HorizontalSwizzle
+BatchedIdentitySwizzle = cutlass_bindings.BatchedIdentitySwizzle
+ThreadblockSwizzleStreamK = cutlass_bindings.ThreadblockSwizzleStreamK
+StridedDgradIdentitySwizzle1 = cutlass_bindings.StridedDgradIdentitySwizzle1
+StridedDgradIdentitySwizzle4 = cutlass_bindings.StridedDgradIdentitySwizzle4
+StridedDgradHorizontalSwizzle = cutlass_bindings.StridedDgradHorizontalSwizzle
+
+
+_swizzling_functors = [
+    IdentitySwizzle1,
+    IdentitySwizzle2,
+    IdentitySwizzle4,
+    IdentitySwizzle8,
+    HorizontalSwizzle,
+    BatchedIdentitySwizzle,
+    ThreadblockSwizzleStreamK,
+    StridedDgradIdentitySwizzle1,
+    StridedDgradIdentitySwizzle4,
+    StridedDgradHorizontalSwizzle,
+]
+
+
+
[docs]def get_swizzling_functors(): + return _swizzling_functors
+
+
+
+ +
+ +
+
+ + + + + + + + + \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/utils/check.html b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/utils/check.html new file mode 100644 index 0000000000000000000000000000000000000000..d4c3c57596e1af2e403653a6612ed44977f95375 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/utils/check.html @@ -0,0 +1,476 @@ + + + + + + + + + cutlass.utils.check - CUTLASS Python + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cutlass.utils.check

+#################################################################################################
+#
+# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#################################################################################################
+
+"""
+Utility functions for checking constraints on kernels and calculating kernel attributes
+"""
+
+import ctypes
+
+import cutlass_bindings
+import cutlass
+from cutlass.backend.library import DataTypeSize, TileDescription
+
+
+
[docs]def calculate_smem_usage_per_stage(tile_description, operation_kind): + """ + Returns the amount of shared memory in bytes consumed in a single stage of a kernel. + + :return: number of bytes of shared memory consumed by a single stage + :rtype: int + """ + m, n, k = tile_description.threadblock_shape + + if operation_kind == cutlass.OperationKind.Gemm: + stage_barrier_bytes = 32 + return ( + (DataTypeSize[tile_description.math_instruction.element_a] * m * k // 8) + + (DataTypeSize[tile_description.math_instruction.element_b] * k * n // 8) + + stage_barrier_bytes + ) + else: + raise Exception(f"No available shared memory calculation for operation kind {operation.operation_kind}")
+ + +
[docs]def calculate_smem_usage(operation): + """ + Returns the amount of shared memory in bytes consumed by a kernel. + + :return: number of bytes of shared memory consumed by the operation + :return: int + """ + _per_stage = calculate_smem_usage_per_stage(operation.tile_description, operation.operation_kind) + return _per_stage * operation.tile_description.stages
+ + +
[docs]def valid_stage_count(cc: int, td: TileDescription) -> tuple: + """ + Checks whether a device with `cc` supports the number of stages within `tile_description`, both + based on raw limits on the number of stages and based on shared memory capacity + + :param cc: compute capability of device in question + :type cc: int + :param td: tile description to check + :type td: TileDescription + + :return: tuple with the first element indicating whether the provided tile description is + valid for the provided device and the second element being an error message + :rtype: tuple + """ + if cc == 90 and (td.stages is None or td.stages == 0): + # Stage count of None or 0 for SM90 indicates that the CollectiveBuilder automatically + # determines the stage count to use. Thus, all settings are valid in these scenarios. + return (True, "") + + if td.stages <= 0: + return (False, f"Stage counts must be positive integers. Tile description has stage count of {td.stages}.") + + if cc < 80 and td.stages != 2: + return (False, f"Tile description has stage count of {td.stages}, " + f"but only 2 stages are supported on SM{cc}.") + + smem_per_stage = calculate_smem_usage_per_stage(td, cutlass.OperationKind.Gemm) + smem_arch = cutlass.SharedMemPerCC[cc] << 10 + if (smem_per_stage * td.stages) > smem_arch: + return ( False, + "Configuration uses too much shared memory. Consider reducing stage count or tile shape.\n" + f"Details: configuration uses {smem_per_stage} bytes of shared memory per stage, and " + f"{td.stages} stages for a total of {smem_per_stage * td.stages} bytes.\n" + f"The maxmium amoung of shared memory that can be used per block on CC {cc} is {smem_arch}.") + + return (True, "")
+ + +
[docs]def valid_cluster_shape(cc: int, cluster_shape: list) -> tuple: + """ + Checks whether a device with `cc` supports a thread block cluster of shape `cluster_shape`. + + :param cc: compute capability of device in question + :type cc: int + :param cluster_shape: dimensions of thread block cluster shape to check + :type cluster_shape: list + + :return: tuple with the first element indicating whether the provided cluster shape is + valid for the provided device and the second element being an error message + :rtype: tuple + """ + + if cc < 90: + if cluster_shape != [1, 1, 1]: + return (False, + f"Cluster shape for pre-SM90 architectures must be [1, 1, 1]. Received cluster shape of " + f"{cluster_shape} for SM{cc}.") + else: + return (True, "") + + if len(cluster_shape) != 3: + return (False, + f"Cluster shapes must be rank-3. Received {cluster_shape} (rank {len(cluster_shape)}") + + if cluster_shape[2] != 1: + return (False, + "CUTLASS kernels currently require the third dimension of cluster shape to be 1. " + f"Received cluster shape of {cluster_shape}.") + + # The CUDA programming guide currently defines a maximum of 8 thread blocks per cluster + # as being portably supported (https://docs.nvidia.com/cuda/cuda-c-programming-guide/#thread-block-clusters). + # Current CUTLASS kernels only have non-unit cluster dimensions within the first two dimensions, + # so we check that the first two dimensions of the cluster shape do not exceed 8 thread blocks in total. + blocks_in_2d = cluster_shape[0] * cluster_shape[1] + if blocks_in_2d > 8: + return (False, + f"Thread block clusters with more than 8 thread blocks are currently unsupported on SM{cc}. " + f"Received cluster shape {cluster_shape}, which has {blocks_in_2d} thread blocks.") + return (True, "")
+ + +
[docs]def valid_kernel_schedule(cc: int, kernel_schedule: cutlass.KernelScheduleType) -> tuple: + """ + Checks whether a device with ``cc`` supports ``kernel_schedule``. + + :param cc: compute capability of device in question + :type cc: int + :param kernel_schedule: kernel schedule type + :type KernelScheduleType: cutlass.KernelScheduleType + + :return: tuple with the first element indicating whether the provided kernel schedule is + valid for the provided device and the second element being an error message + :rtype: tuple + """ + if kernel_schedule != cutlass.KernelScheduleType.ScheduleAuto and cc < 90: + return (False, "Non-default kernel schedules are only supported on SM90 and beyond") + return (True, "")
+ + +
[docs]def alignment_or_default(alignment_provided: int, default_alignment: int) -> int: + """ + Returns `alignment_provided` if it is set, otherwise `default_alignment` and checks + that `alignment_provided` does not exceed `default_alignment`. + + :param alignment_provided: alignment preference specified. Can be None. + :type alignment_provided: int + :param default_alignment: alignment to use if `alignment_provided` is None + :type default_alignment: int + + :return: alignment to use + :rtype: int + """ + if alignment_provided is not None: + if alignment_provided > default_alignment: + raise Exception(f"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.") + return alignment_provided + + return default_alignment
+
+
+
+ +
+ +
+
+ + + + + + + + + \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/utils/datatypes.html b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/utils/datatypes.html new file mode 100644 index 0000000000000000000000000000000000000000..3046047577aca9911c85b5d2f43f737562d16661 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/cutlass/utils/datatypes.html @@ -0,0 +1,623 @@ + + + + + + + + + cutlass.utils.datatypes - CUTLASS Python + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cutlass.utils.datatypes

+#################################################################################################
+#
+# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#################################################################################################
+
+"""
+Utility functions for converting between frontend datatypes and CUTLASS datatypes
+"""
+
+import cutlass_bindings
+
+import cutlass
+from cutlass.backend.library import (
+    DataTypeSize,
+    MathInstruction,
+    MathOperation,
+    ShortLayoutTypeNames,
+    TileDescription,
+)
+
+try:
+    import numpy as np
+
+    numpy_available = True
+    _library_to_numpy_dict = {
+        cutlass.DataType.f16: np.float16,
+        cutlass.DataType.f32: np.float32,
+        cutlass.DataType.f64: np.float64,
+        cutlass.DataType.s8: np.int8,
+        cutlass.DataType.s32: np.int32,
+    }
+except ImportError:
+    numpy_available = False
+    _library_to_numpy_dict = {}
+
+
+
[docs]def numpy_library_type(inp) -> cutlass.DataType: + if numpy_available: + if inp == np.float16: + return cutlass.DataType.f16 + elif inp == np.float32: + return cutlass.DataType.f32 + elif inp == np.float64: + return cutlass.DataType.f64 + elif inp == np.int8: + return cutlass.DataType.s8 + elif inp == np.int32: + return cutlass.DataType.s32 + return None
+ + +
[docs]def numpy_type(inp): + return _library_to_numpy_dict.get(inp, None)
+ + +try: + import cupy as cp + + cupy_available = True + _library_to_cupy_dict = { + cutlass.DataType.f16: cp.float16, + cutlass.DataType.f32: cp.float32, + cutlass.DataType.f64: cp.float64, + cutlass.DataType.s8: cp.int8, + cutlass.DataType.s32: cp.int32, + } +except ImportError: + cupy_available = False + _library_to_cupy_dict = {} + + +
[docs]def cupy_library_type(inp) -> cutlass.DataType: + if cupy_available: + if inp == cp.float16: + return cutlass.DataType.f16 + elif inp == cp.float32: + return cutlass.DataType.f32 + elif inp == cp.float64: + return cutlass.DataType.f64 + return None
+ + +
[docs]def cupy_type(inp): + return _library_to_cupy_dict.get(inp, None)
+ + +try: + import torch + + torch_available = True + _torch_to_library_dict = { + torch.half: cutlass.DataType.f16, + torch.float16: cutlass.DataType.f16, + torch.float: cutlass.DataType.f32, + torch.float32: cutlass.DataType.f32, + torch.double: cutlass.DataType.f64, + torch.float64: cutlass.DataType.f64, + } + + _library_to_torch_dict = { + cutlass.DataType.f16: torch.half, + cutlass.DataType.f16: torch.float16, + cutlass.DataType.f32: torch.float, + cutlass.DataType.f32: torch.float32, + cutlass.DataType.f64: torch.double, + cutlass.DataType.f64: torch.float64, + } +except ImportError: + torch_available = False + _torch_to_library_dict = {} + _library_to_torch_dict = {} + + +
[docs]def torch_library_type(inp) -> cutlass.DataType: + return _torch_to_library_dict.get(inp, None)
+ + +
[docs]def torch_type(inp): + return _library_to_torch_dict.get(inp, None)
+ + +try: + import bfloat16 + + bfloat16_available = True +except ImportError: + bfloat16_available = False + + +
[docs]def bfloat16_library_type(inp) -> cutlass.DataType: + if bfloat16_available: + if inp == bfloat16.bfloat16: + return cutlass.DataType.bf16
+ + +
[docs]def bfloat16_type(inp) -> bfloat16.bfloat16: + if bfloat16_available: + if inp == cutlass.DataType.bf16: + return bfloat16.bfloat16
+ + +# Mapping from library data type to Python-bound CUTLASS data type +library_to_binding_dict = { + cutlass.DataType.s8: cutlass_bindings.int8, + cutlass.DataType.s32: cutlass_bindings.int32, + cutlass.DataType.f16: cutlass_bindings.float16, + cutlass.DataType.bf16: cutlass_bindings.bfloat16, + cutlass.DataType.f32: cutlass_bindings.float32, + cutlass.DataType.f64: cutlass_bindings.float64, + cutlass.DataType.tf32: cutlass_bindings.tfloat32, +} + +# Mapping from Python-bound CUTLASS data type to library data type +binding_to_library = { + cutlass_bindings.int8: cutlass.DataType.s8, + cutlass_bindings.int32: cutlass.DataType.s32, + cutlass_bindings.float16: cutlass.DataType.f16, + cutlass_bindings.bfloat16: cutlass.DataType.bf16, + cutlass_bindings.float32: cutlass.DataType.f32, + cutlass_bindings.float64: cutlass.DataType.f64, + cutlass_bindings.tfloat32: cutlass.DataType.tf32, +} + + +
[docs]def binding_library_type(inp): + if inp in binding_to_library: + return binding_to_library[inp] + return None
+ + +
[docs]def has_binding_type(inp: cutlass.DataType): + return inp in library_to_binding_dict
+ + +
[docs]def library_to_binding(inp: cutlass.DataType): + if not has_binding_type(inp): + raise Exception(f"No available conversion from library type {inp} to Python-bound CUTLASS type") + return library_to_binding_dict[inp]
+ + +
[docs]def library_type(inp): + if inp in cutlass.DataTypeSize.keys(): + return inp + + for cvt_fn in [ + bfloat16_library_type, + cupy_library_type, + numpy_library_type, + torch_library_type, + binding_library_type, + ]: + out = cvt_fn(inp) + if out is not None: + return out + + raise Exception(f"No available conversion from type {inp} to a library type.")
+ + +
[docs]def library_layout(layout): + if layout in cutlass.LayoutTag.keys(): + return layout + + # Convert Python-bound CUTLASS layout to profiler library layout + if layout == cutlass_bindings.RowMajor: + return cutlass.LayoutType.RowMajor + elif layout == cutlass_bindings.ColumnMajor: + return cutlass.LayoutType.ColumnMajor + else: + raise Exception(f"No conversion available for layout {layout} to library layout.")
+ + +
[docs]def binding_type(inp): + if inp in DataTypeSize.keys(): + return inp + + libtype = library_type(inp) + return library_to_binding(libtype)
+ + +
[docs]def binding_layout(layout): + if layout in ShortLayoutTypeNames.keys(): + return layout + elif layout == cutlass.LayoutType.RowMajor: + return cutlass_bindings.RowMajor + elif layout == cutlass.LayoutType.ColumnMajor: + return cutlass_bindings.ColumnMajor + else: + raise Exception(f"No conversion available for layout {layout} to Python-bound CUTLASS layout.")
+ + +def _tensor_from_numpy(np_tensor): + dtype = library_type(np_tensor.dtype) + if np_tensor.flags.c_contiguous: + layout = cutlass.LayoutType.RowMajor + elif np_tensor.flags.f_contiguous: + layout = cutlass.LayoutType.ColumnMajor + return (dtype, layout) + + +def _tensor_from_torch(pt_tensor): + dtype = library_type(pt_tensor.dtype) + return (dtype, cutlass.LayoutType.RowMajor) + + +
[docs]def get_datatype_and_layout(tensor): + if (numpy_available and isinstance(tensor, np.ndarray)) or ( + cupy_available and isinstance(tensor, cp.ndarray) + ): + return _tensor_from_numpy(tensor) + elif torch_available and isinstance(tensor, torch.Tensor): + return _tensor_from_torch(tensor) + else: + raise Exception(f"Unable to convert tensor of type {type(tensor)} to Python-bound CUTLASS datatype and layout.")
+ + +
[docs]def binding_opclass(opclass: cutlass.OpcodeClass): + if opclass == cutlass.OpcodeClass.TensorOp: + return cutlass_bindings.OpClass.TensorOp + elif opclass == cutlass.OpcodeClass.Simt: + return cutlass_bindings.OpClass.Simt + else: + raise Exception(f"Unable to convert opcode class of type {opclass} to Python-bound CUTLASS opcode class.")
+ + +_math_operation_value_map = {x.value: x for x in MathOperation} + + +
[docs]def backend_math_operation(math_op: cutlass.MathOperation): + if math_op.value not in _math_operation_value_map.keys(): + raise Exception(f"Unable to convert math operation of type {math_op} to backend math operation.") + return _math_operation_value_map[math_op.value]
+ + +
[docs]def construct_backend_td(td: cutlass.TileDescription, + kernel_schedule: cutlass.KernelScheduleType) -> TileDescription: + mi = td.math_instruction + backend_mi = MathInstruction( + mi.instruction_shape, + binding_type(mi.element_a), + binding_type(mi.element_b), + binding_type(mi.element_accumulator), + binding_opclass(mi.opcode_class), + backend_math_operation(mi.math_operation) + ) + return TileDescription(td.threadblock_shape, td.stages, td.warp_count, + backend_mi, td.cluster_shape, kernel_schedule)
+ + +
[docs]def td_from_profiler_op(op) -> TileDescription: + """ + Converts the profiler's TileDescription in ``op`` into the backend TileDescription + + :param op: profiler Operation + + :returns: backend TileDescription + :rtype: cutlass.backend.TileDescription + """ + schedule = op.kernel_schedule if hasattr(op, 'kernel_schedule') else None + return construct_backend_td(op.tile_description, schedule)
+ + +
[docs]def td_from_profiler_td(td: cutlass.backend.TileDescription) -> TileDescription: + """ + Converts the profiler's TileDescription into the backend TileDescription + + :param td: profiler TileDescription + :type td: cutlass.TileDescription + + :returns: backend TileDescription + :rtype: cutlass.backend.TileDescription + """ + return construct_backend_td(td, kernel_schedule=None)
+
+
+
+ +
+ +
+
+ + + + + + + + + \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/index.html b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/index.html new file mode 100644 index 0000000000000000000000000000000000000000..7cfe6e9db5139ee972c4cbe40569b787c6f29566 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_modules/index.html @@ -0,0 +1,293 @@ + + + + + + + + + Overview: module code - CUTLASS Python + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+ +
+ +
+ +
+
+ + + + + + + + + \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/contribute.md.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/contribute.md.txt new file mode 100644 index 0000000000000000000000000000000000000000..42475252b27d25fcafa8debe1e19e897b981e1d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/contribute.md.txt @@ -0,0 +1,9 @@ +# Contributing + +Thank you for your interest in contributing to the CUTLASS Python interface. Based on the type of contribution, it will fall into two categories: + +1. You want to report a bug, feature request, or documentation issue + - File an [issue](https://github.com/NVIDIA/cutlass/issues/new/choose) describing what you encountered or what you want to see changed. + - The CUTLASS team will evaluate the issues and triage them, scheduling them for a release. If you believe the issue needs priority attention, comment on the issue to notify the team. +2. You want to implement a feature or bug-fix + - We welcome contributions from the community. We recommend that you contribute via a [pull request](https://github.com/NVIDIA/cutlass/pulls). If you have questions about CUTLASS, consider asking a question via the [Discussions](https://github.com/NVIDIA/cutlass/discussions) tab. Please be sure to search through both existing issues and discussions to see whether your question has already been answered. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/cutlass.emit.rst.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/cutlass.emit.rst.txt new file mode 100644 index 0000000000000000000000000000000000000000..3e65d407766a1e71c8e6553f2e05fed0cfa74311 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/cutlass.emit.rst.txt @@ -0,0 +1,18 @@ +Emitters +======== + +Common +------ + +.. automodule:: cutlass.emit.common + :members: + :undoc-members: + :show-inheritance: + +PyTorch +------- + +.. automodule:: cutlass.emit.pytorch + :members: + :undoc-members: + :show-inheritance: diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/cutlass.op.rst.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/cutlass.op.rst.txt new file mode 100644 index 0000000000000000000000000000000000000000..3b8a2b7e574b21b059d2be93b159e0203fbefb84 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/cutlass.op.rst.txt @@ -0,0 +1,26 @@ +Operations +========== + +GEMM +---- + +.. automodule:: cutlass.op.gemm + :members: + :undoc-members: + :show-inheritance: + +Grouped GEMM +------------ + +.. automodule:: cutlass.op.gemm_grouped + :members: + :undoc-members: + :show-inheritance: + +Operation +--------- + +.. automodule:: cutlass.op.op + :members: + :undoc-members: + :show-inheritance: diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/cutlass.rst.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/cutlass.rst.txt new file mode 100644 index 0000000000000000000000000000000000000000..a65c25182110b056172db593388f85dd7a097dde --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/cutlass.rst.txt @@ -0,0 +1,36 @@ +CUTLASS +======= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 1 + + cutlass.emit + cutlass.op + cutlass.utils + +Epilogue +-------- + +.. automodule:: cutlass.epilogue + :members: + :undoc-members: + :show-inheritance: + +Library Defaults +---------------- + +.. automodule:: cutlass.library_defaults + :members: + :undoc-members: + :show-inheritance: + +Swizzle +---------- + +.. automodule:: cutlass.swizzle + :members: + :undoc-members: + :show-inheritance: diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/cutlass.utils.rst.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/cutlass.utils.rst.txt new file mode 100644 index 0000000000000000000000000000000000000000..58e56e566503cc7fcac312d4936d19f40604d873 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/cutlass.utils.rst.txt @@ -0,0 +1,18 @@ +Utilities +========= + +Checks +------ + +.. automodule:: cutlass.utils.check + :members: + :undoc-members: + :show-inheritance: + +Data Types +---------- + +.. automodule:: cutlass.utils.datatypes + :members: + :undoc-members: + :show-inheritance: diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/examples.rst.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/examples.rst.txt new file mode 100644 index 0000000000000000000000000000000000000000..3cea362113558051c3daad1b40c12866b71a2819 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/examples.rst.txt @@ -0,0 +1,9 @@ +Examples +================== + +.. toctree:: + :maxdepth: 5 + + Basic GEMM + Epilogue + PyTorch Extension diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/externals/00_basic_gemm.nblink.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/externals/00_basic_gemm.nblink.txt new file mode 100644 index 0000000000000000000000000000000000000000..b384198567f40e4a0384a592eac91bd3c5e1048a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/externals/00_basic_gemm.nblink.txt @@ -0,0 +1,3 @@ +{ + "path": "./../../../../examples/python/00_basic_gemm.ipynb" +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/externals/01_epilogue.nblink.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/externals/01_epilogue.nblink.txt new file mode 100644 index 0000000000000000000000000000000000000000..14503a1ee3d81f7d02247c0132d49768781e684f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/externals/01_epilogue.nblink.txt @@ -0,0 +1,3 @@ +{ + "path": "./../../../../examples/python/01_epilogue.ipynb" +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/externals/02_pytorch_extension_grouped_gemm.nblink.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/externals/02_pytorch_extension_grouped_gemm.nblink.txt new file mode 100644 index 0000000000000000000000000000000000000000..7da19aff8d9043c6d4055c1116e7a9b739beebf1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/externals/02_pytorch_extension_grouped_gemm.nblink.txt @@ -0,0 +1,3 @@ +{ + "path": "./../../../../examples/python/02_pytorch_extension_grouped_gemm.ipynb" +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/index.rst.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/index.rst.txt new file mode 100644 index 0000000000000000000000000000000000000000..73cc742d5a0048f6de2add905919bd49c21164d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/index.rst.txt @@ -0,0 +1,55 @@ +.. CUTLASS Python interface documentation master file, created by + sphinx-quickstart on Mon Feb 13 17:57:39 2023. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +.. include:: ../../README.md + :start-line: 1 + :parser: markdown + +.. toctree:: + :hidden: + + Home + +.. toctree:: + :hidden: + :caption: Getting Started: + + install.md + Getting Started + contribute.md + +.. toctree:: + :hidden: + :caption: Python Documentation: + + modules.rst + +.. toctree:: + :hidden: + :caption: Examples and Tutorials: + + examples.rst + +.. toctree:: + :hidden: + :caption: Advanced: + +.. toctree:: + :hidden: + :caption: FAQ: + +.. toctree:: + :hidden: + :caption: Reference: + + Github + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/install.md.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/install.md.txt new file mode 100644 index 0000000000000000000000000000000000000000..4b5da105178e4e75d31569f13444fce51043a577 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/install.md.txt @@ -0,0 +1,36 @@ +# Installation + +## Installing from source + +Installing from source requires the latest CUDA Toolkit that matches the major.minor of CUDA Python installed. + +Prior to installing the CUTLASS Python interface, one may optionally set the following environment variables: +* `CUTLASS_PATH`: the path to the cloned CUTLASS repository +* `CUDA_INSTALL_PATH`: the path to the installation of CUDA + +If these environment variables are not set, the installation process will infer them to be the following: +* `CUTLASS_PATH`: one directory level above the current directory (i.e., `$(pwd)/..`) +* `CUDA_INSTALL_PATH`: the directory holding `/bin/nvcc` for the first version of `nvcc` on `$PATH` (i.e., `which nvcc | awk -F'/bin/nvcc' '{print $1}'`) + +**NOTE:** The version of `cuda-python` installed must match the CUDA version in `CUDA_INSTALL_PATH`. + +### Installing a developer-mode package +The CUTLASS Python interface can currently be installed via: +```bash +python setup.py develop --user +``` +This will allow changes to the Python interface source to be reflected when using the Python interface. + +We plan to add support for installing via `python setup.py install` in a future release. + +## Docker +To ensure that you have all of the necessary Python modules for running the examples using the +CUTLASS Python interface, we recommend using one of the Docker images located in the docker directory. + +For example, to build and launch a container that uses CUDA 12.1 via an NGC PyTorch container, run: +```bash +docker build -t cutlass-cuda12.1:latest -f docker/Dockerfile-cuda12.1-pytorch . +docker run --gpus all -it --rm cutlass-cuda12.1:latest +``` + +The CUTLASS Python interface has been tested with CUDA 11.8, 12.0, and 12.1 on Python 3.8.10 and 3.9.7. diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/modules.rst.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/modules.rst.txt new file mode 100644 index 0000000000000000000000000000000000000000..467824e9e8e1ad200bdb2d7939afaaa56548fccb --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_sources/modules.rst.txt @@ -0,0 +1,7 @@ +CUTLASS Python API +================== + +.. toctree:: + :maxdepth: 5 + + cutlass diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/basic.css b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/basic.css new file mode 100644 index 0000000000000000000000000000000000000000..7577acb1ad176e3f58c15ddf9bf1f73525dfe7ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/basic.css @@ -0,0 +1,903 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/check-solid.svg b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/check-solid.svg new file mode 100644 index 0000000000000000000000000000000000000000..92fad4b5c0b766702548d26ab2be2832e19ad4da --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/check-solid.svg @@ -0,0 +1,4 @@ + + + + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/clipboard.min.js b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/clipboard.min.js new file mode 100644 index 0000000000000000000000000000000000000000..54b3c4638111e1d6433fa1a88b87180667db9cf0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/clipboard.min.js @@ -0,0 +1,7 @@ +/*! + * clipboard.js v2.0.8 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */ +!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.ClipboardJS=e():t.ClipboardJS=e()}(this,function(){return n={686:function(t,e,n){"use strict";n.d(e,{default:function(){return o}});var e=n(279),i=n.n(e),e=n(370),u=n.n(e),e=n(817),c=n.n(e);function a(t){try{return document.execCommand(t)}catch(t){return}}var f=function(t){t=c()(t);return a("cut"),t};var l=function(t){var e,n,o,r=1 + + + + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/copybutton.css b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/copybutton.css new file mode 100644 index 0000000000000000000000000000000000000000..f1916ec7d1bb993b7909cd57cef0446d922982ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/copybutton.css @@ -0,0 +1,94 @@ +/* Copy buttons */ +button.copybtn { + position: absolute; + display: flex; + top: .3em; + right: .3em; + width: 1.7em; + height: 1.7em; + opacity: 0; + transition: opacity 0.3s, border .3s, background-color .3s; + user-select: none; + padding: 0; + border: none; + outline: none; + border-radius: 0.4em; + /* The colors that GitHub uses */ + border: #1b1f2426 1px solid; + background-color: #f6f8fa; + color: #57606a; +} + +button.copybtn.success { + border-color: #22863a; + color: #22863a; +} + +button.copybtn svg { + stroke: currentColor; + width: 1.5em; + height: 1.5em; + padding: 0.1em; +} + +div.highlight { + position: relative; +} + +/* Show the copybutton */ +.highlight:hover button.copybtn, button.copybtn.success { + opacity: 1; +} + +.highlight button.copybtn:hover { + background-color: rgb(235, 235, 235); +} + +.highlight button.copybtn:active { + background-color: rgb(187, 187, 187); +} + +/** + * A minimal CSS-only tooltip copied from: + * https://codepen.io/mildrenben/pen/rVBrpK + * + * To use, write HTML like the following: + * + *

Short

+ */ + .o-tooltip--left { + position: relative; + } + + .o-tooltip--left:after { + opacity: 0; + visibility: hidden; + position: absolute; + content: attr(data-tooltip); + padding: .2em; + font-size: .8em; + left: -.2em; + background: grey; + color: white; + white-space: nowrap; + z-index: 2; + border-radius: 2px; + transform: translateX(-102%) translateY(0); + transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); +} + +.o-tooltip--left:hover:after { + display: block; + opacity: 1; + visibility: visible; + transform: translateX(-100%) translateY(0); + transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); + transition-delay: .5s; +} + +/* By default the copy button shouldn't show up when printing a page */ +@media print { + button.copybtn { + display: none; + } +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/copybutton.js b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/copybutton.js new file mode 100644 index 0000000000000000000000000000000000000000..2ea7ff3e217ba6a8710731c335e515535bd76415 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/copybutton.js @@ -0,0 +1,248 @@ +// Localization support +const messages = { + 'en': { + 'copy': 'Copy', + 'copy_to_clipboard': 'Copy to clipboard', + 'copy_success': 'Copied!', + 'copy_failure': 'Failed to copy', + }, + 'es' : { + 'copy': 'Copiar', + 'copy_to_clipboard': 'Copiar al portapapeles', + 'copy_success': '¡Copiado!', + 'copy_failure': 'Error al copiar', + }, + 'de' : { + 'copy': 'Kopieren', + 'copy_to_clipboard': 'In die Zwischenablage kopieren', + 'copy_success': 'Kopiert!', + 'copy_failure': 'Fehler beim Kopieren', + }, + 'fr' : { + 'copy': 'Copier', + 'copy_to_clipboard': 'Copier dans le presse-papier', + 'copy_success': 'Copié !', + 'copy_failure': 'Échec de la copie', + }, + 'ru': { + 'copy': 'Скопировать', + 'copy_to_clipboard': 'Скопировать в буфер', + 'copy_success': 'Скопировано!', + 'copy_failure': 'Не удалось скопировать', + }, + 'zh-CN': { + 'copy': '复制', + 'copy_to_clipboard': '复制到剪贴板', + 'copy_success': '复制成功!', + 'copy_failure': '复制失败', + }, + 'it' : { + 'copy': 'Copiare', + 'copy_to_clipboard': 'Copiato negli appunti', + 'copy_success': 'Copiato!', + 'copy_failure': 'Errore durante la copia', + } +} + +let locale = 'en' +if( document.documentElement.lang !== undefined + && messages[document.documentElement.lang] !== undefined ) { + locale = document.documentElement.lang +} + +let doc_url_root = DOCUMENTATION_OPTIONS.URL_ROOT; +if (doc_url_root == '#') { + doc_url_root = ''; +} + +/** + * SVG files for our copy buttons + */ +let iconCheck = ` + ${messages[locale]['copy_success']} + + +` + +// If the user specified their own SVG use that, otherwise use the default +let iconCopy = ``; +if (!iconCopy) { + iconCopy = ` + ${messages[locale]['copy_to_clipboard']} + + + +` +} + +/** + * Set up copy/paste for code blocks + */ + +const runWhenDOMLoaded = cb => { + if (document.readyState != 'loading') { + cb() + } else if (document.addEventListener) { + document.addEventListener('DOMContentLoaded', cb) + } else { + document.attachEvent('onreadystatechange', function() { + if (document.readyState == 'complete') cb() + }) + } +} + +const codeCellId = index => `codecell${index}` + +// Clears selected text since ClipboardJS will select the text when copying +const clearSelection = () => { + if (window.getSelection) { + window.getSelection().removeAllRanges() + } else if (document.selection) { + document.selection.empty() + } +} + +// Changes tooltip text for a moment, then changes it back +// We want the timeout of our `success` class to be a bit shorter than the +// tooltip and icon change, so that we can hide the icon before changing back. +var timeoutIcon = 2000; +var timeoutSuccessClass = 1500; + +const temporarilyChangeTooltip = (el, oldText, newText) => { + el.setAttribute('data-tooltip', newText) + el.classList.add('success') + // Remove success a little bit sooner than we change the tooltip + // So that we can use CSS to hide the copybutton first + setTimeout(() => el.classList.remove('success'), timeoutSuccessClass) + setTimeout(() => el.setAttribute('data-tooltip', oldText), timeoutIcon) +} + +// Changes the copy button icon for two seconds, then changes it back +const temporarilyChangeIcon = (el) => { + el.innerHTML = iconCheck; + setTimeout(() => {el.innerHTML = iconCopy}, timeoutIcon) +} + +const addCopyButtonToCodeCells = () => { + // If ClipboardJS hasn't loaded, wait a bit and try again. This + // happens because we load ClipboardJS asynchronously. + if (window.ClipboardJS === undefined) { + setTimeout(addCopyButtonToCodeCells, 250) + return + } + + // Add copybuttons to all of our code cells + const COPYBUTTON_SELECTOR = 'div.highlight pre'; + const codeCells = document.querySelectorAll(COPYBUTTON_SELECTOR) + codeCells.forEach((codeCell, index) => { + const id = codeCellId(index) + codeCell.setAttribute('id', id) + + const clipboardButton = id => + `` + codeCell.insertAdjacentHTML('afterend', clipboardButton(id)) + }) + +function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +} + +/** + * Removes excluded text from a Node. + * + * @param {Node} target Node to filter. + * @param {string} exclude CSS selector of nodes to exclude. + * @returns {DOMString} Text from `target` with text removed. + */ +function filterText(target, exclude) { + const clone = target.cloneNode(true); // clone as to not modify the live DOM + if (exclude) { + // remove excluded nodes + clone.querySelectorAll(exclude).forEach(node => node.remove()); + } + return clone.innerText; +} + +// Callback when a copy button is clicked. Will be passed the node that was clicked +// should then grab the text and replace pieces of text that shouldn't be used in output +function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { + var regexp; + var match; + + // Do we check for line continuation characters and "HERE-documents"? + var useLineCont = !!lineContinuationChar + var useHereDoc = !!hereDocDelim + + // create regexp to capture prompt and remaining line + if (isRegexp) { + regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') + } else { + regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') + } + + const outputLines = []; + var promptFound = false; + var gotLineCont = false; + var gotHereDoc = false; + const lineGotPrompt = []; + for (const line of textContent.split('\n')) { + match = line.match(regexp) + if (match || gotLineCont || gotHereDoc) { + promptFound = regexp.test(line) + lineGotPrompt.push(promptFound) + if (removePrompts && promptFound) { + outputLines.push(match[2]) + } else { + outputLines.push(line) + } + gotLineCont = line.endsWith(lineContinuationChar) & useLineCont + if (line.includes(hereDocDelim) & useHereDoc) + gotHereDoc = !gotHereDoc + } else if (!onlyCopyPromptLines) { + outputLines.push(line) + } else if (copyEmptyLines && line.trim() === '') { + outputLines.push(line) + } + } + + // If no lines with the prompt were found then just use original lines + if (lineGotPrompt.some(v => v === true)) { + textContent = outputLines.join('\n'); + } + + // Remove a trailing newline to avoid auto-running when pasting + if (textContent.endsWith("\n")) { + textContent = textContent.slice(0, -1) + } + return textContent +} + + +var copyTargetText = (trigger) => { + var target = document.querySelector(trigger.attributes['data-clipboard-target'].value); + + // get filtered text + let exclude = '.linenos'; + + let text = filterText(target, exclude); + return formatCopyText(text, '', false, true, true, true, '', '') +} + + // Initialize with a callback so we can modify the text before copy + const clipboard = new ClipboardJS('.copybtn', {text: copyTargetText}) + + // Update UI with error/success messages + clipboard.on('success', event => { + clearSelection() + temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_success']) + temporarilyChangeIcon(event.trigger) + }) + + clipboard.on('error', event => { + temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_failure']) + }) +} + +runWhenDOMLoaded(addCopyButtonToCodeCells) \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/copybutton_funcs.js b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/copybutton_funcs.js new file mode 100644 index 0000000000000000000000000000000000000000..dbe1aaad79cd1f835c8fbb304d255e19a3b39512 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/copybutton_funcs.js @@ -0,0 +1,73 @@ +function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +} + +/** + * Removes excluded text from a Node. + * + * @param {Node} target Node to filter. + * @param {string} exclude CSS selector of nodes to exclude. + * @returns {DOMString} Text from `target` with text removed. + */ +export function filterText(target, exclude) { + const clone = target.cloneNode(true); // clone as to not modify the live DOM + if (exclude) { + // remove excluded nodes + clone.querySelectorAll(exclude).forEach(node => node.remove()); + } + return clone.innerText; +} + +// Callback when a copy button is clicked. Will be passed the node that was clicked +// should then grab the text and replace pieces of text that shouldn't be used in output +export function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { + var regexp; + var match; + + // Do we check for line continuation characters and "HERE-documents"? + var useLineCont = !!lineContinuationChar + var useHereDoc = !!hereDocDelim + + // create regexp to capture prompt and remaining line + if (isRegexp) { + regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') + } else { + regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') + } + + const outputLines = []; + var promptFound = false; + var gotLineCont = false; + var gotHereDoc = false; + const lineGotPrompt = []; + for (const line of textContent.split('\n')) { + match = line.match(regexp) + if (match || gotLineCont || gotHereDoc) { + promptFound = regexp.test(line) + lineGotPrompt.push(promptFound) + if (removePrompts && promptFound) { + outputLines.push(match[2]) + } else { + outputLines.push(line) + } + gotLineCont = line.endsWith(lineContinuationChar) & useLineCont + if (line.includes(hereDocDelim) & useHereDoc) + gotHereDoc = !gotHereDoc + } else if (!onlyCopyPromptLines) { + outputLines.push(line) + } else if (copyEmptyLines && line.trim() === '') { + outputLines.push(line) + } + } + + // If no lines with the prompt were found then just use original lines + if (lineGotPrompt.some(v => v === true)) { + textContent = outputLines.join('\n'); + } + + // Remove a trailing newline to avoid auto-running when pasting + if (textContent.endsWith("\n")) { + textContent = textContent.slice(0, -1) + } + return textContent +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/debug.css b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/debug.css new file mode 100644 index 0000000000000000000000000000000000000000..74d4aec33e5ccd45b77c871a005d96f9f8ae1dc4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/debug.css @@ -0,0 +1,69 @@ +/* + This CSS file should be overridden by the theme authors. It's + meant for debugging and developing the skeleton that this theme provides. +*/ +body { + font-family: -apple-system, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, + "Apple Color Emoji", "Segoe UI Emoji"; + background: lavender; +} +.sb-announcement { + background: rgb(131, 131, 131); +} +.sb-announcement__inner { + background: black; + color: white; +} +.sb-header { + background: lightskyblue; +} +.sb-header__inner { + background: royalblue; + color: white; +} +.sb-header-secondary { + background: lightcyan; +} +.sb-header-secondary__inner { + background: cornflowerblue; + color: white; +} +.sb-sidebar-primary { + background: lightgreen; +} +.sb-main { + background: blanchedalmond; +} +.sb-main__inner { + background: antiquewhite; +} +.sb-header-article { + background: lightsteelblue; +} +.sb-article-container { + background: snow; +} +.sb-article-main { + background: white; +} +.sb-footer-article { + background: lightpink; +} +.sb-sidebar-secondary { + background: lightgoldenrodyellow; +} +.sb-footer-content { + background: plum; +} +.sb-footer-content__inner { + background: palevioletred; +} +.sb-footer { + background: pink; +} +.sb-footer__inner { + background: salmon; +} +.sb-article { + background: white; +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/doctools.js b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/doctools.js new file mode 100644 index 0000000000000000000000000000000000000000..d06a71d7518041301a303697d2a3c372648eb7bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/documentation_options.js b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/documentation_options.js new file mode 100644 index 0000000000000000000000000000000000000000..6ba7fc6804f28ef019381399b107052bc7e1420f --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/documentation_options.js @@ -0,0 +1,14 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '3.1.0', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/language_data.js b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/language_data.js new file mode 100644 index 0000000000000000000000000000000000000000..250f5665fa64b70c822190199b3b804b10f8b9d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/language_data.js @@ -0,0 +1,199 @@ +/* + * language_data.js + * ~~~~~~~~~~~~~~~~ + * + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, is available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/nbsphinx-broken-thumbnail.svg b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/nbsphinx-broken-thumbnail.svg new file mode 100644 index 0000000000000000000000000000000000000000..4919ca88291cffe000b03720fab446a2f94423c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/nbsphinx-broken-thumbnail.svg @@ -0,0 +1,9 @@ + + + + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/nbsphinx-code-cells.css b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/nbsphinx-code-cells.css new file mode 100644 index 0000000000000000000000000000000000000000..199fa5a42f8a07bb2e9e31e1b71e2643cb924c7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/nbsphinx-code-cells.css @@ -0,0 +1,253 @@ +/* remove conflicting styling from Sphinx themes */ +div.nbinput.container div.prompt *, +div.nboutput.container div.prompt *, +div.nbinput.container div.input_area pre, +div.nboutput.container div.output_area pre, +div.nbinput.container div.input_area .highlight, +div.nboutput.container div.output_area .highlight { + border: none; + padding: 0; + margin: 0; + box-shadow: none; +} + +div.nbinput.container > div[class*=highlight], +div.nboutput.container > div[class*=highlight] { + margin: 0; +} + +div.nbinput.container div.prompt *, +div.nboutput.container div.prompt * { + background: none; +} + +div.nboutput.container div.output_area .highlight, +div.nboutput.container div.output_area pre { + background: unset; +} + +div.nboutput.container div.output_area div.highlight { + color: unset; /* override Pygments text color */ +} + +/* avoid gaps between output lines */ +div.nboutput.container div[class*=highlight] pre { + line-height: normal; +} + +/* input/output containers */ +div.nbinput.container, +div.nboutput.container { + display: -webkit-flex; + display: flex; + align-items: flex-start; + margin: 0; + width: 100%; +} +@media (max-width: 540px) { + div.nbinput.container, + div.nboutput.container { + flex-direction: column; + } +} + +/* input container */ +div.nbinput.container { + padding-top: 5px; +} + +/* last container */ +div.nblast.container { + padding-bottom: 5px; +} + +/* input prompt */ +div.nbinput.container div.prompt pre { + color: #307FC1; +} + +/* output prompt */ +div.nboutput.container div.prompt pre { + color: #BF5B3D; +} + +/* all prompts */ +div.nbinput.container div.prompt, +div.nboutput.container div.prompt { + width: 4.5ex; + padding-top: 5px; + position: relative; + user-select: none; +} + +div.nbinput.container div.prompt > div, +div.nboutput.container div.prompt > div { + position: absolute; + right: 0; + margin-right: 0.3ex; +} + +@media (max-width: 540px) { + div.nbinput.container div.prompt, + div.nboutput.container div.prompt { + width: unset; + text-align: left; + padding: 0.4em; + } + div.nboutput.container div.prompt.empty { + padding: 0; + } + + div.nbinput.container div.prompt > div, + div.nboutput.container div.prompt > div { + position: unset; + } +} + +/* disable scrollbars and line breaks on prompts */ +div.nbinput.container div.prompt pre, +div.nboutput.container div.prompt pre { + overflow: hidden; + white-space: pre; +} + +/* input/output area */ +div.nbinput.container div.input_area, +div.nboutput.container div.output_area { + -webkit-flex: 1; + flex: 1; + overflow: auto; +} +@media (max-width: 540px) { + div.nbinput.container div.input_area, + div.nboutput.container div.output_area { + width: 100%; + } +} + +/* input area */ +div.nbinput.container div.input_area { + border: 1px solid #e0e0e0; + border-radius: 2px; + /*background: #f5f5f5;*/ +} + +/* override MathJax center alignment in output cells */ +div.nboutput.container div[class*=MathJax] { + text-align: left !important; +} + +/* override sphinx.ext.imgmath center alignment in output cells */ +div.nboutput.container div.math p { + text-align: left; +} + +/* standard error */ +div.nboutput.container div.output_area.stderr { + background: #fdd; +} + +/* ANSI colors */ +.ansi-black-fg { color: #3E424D; } +.ansi-black-bg { background-color: #3E424D; } +.ansi-black-intense-fg { color: #282C36; } +.ansi-black-intense-bg { background-color: #282C36; } +.ansi-red-fg { color: #E75C58; } +.ansi-red-bg { background-color: #E75C58; } +.ansi-red-intense-fg { color: #B22B31; } +.ansi-red-intense-bg { background-color: #B22B31; } +.ansi-green-fg { color: #00A250; } +.ansi-green-bg { background-color: #00A250; } +.ansi-green-intense-fg { color: #007427; } +.ansi-green-intense-bg { background-color: #007427; } +.ansi-yellow-fg { color: #DDB62B; } +.ansi-yellow-bg { background-color: #DDB62B; } +.ansi-yellow-intense-fg { color: #B27D12; } +.ansi-yellow-intense-bg { background-color: #B27D12; } +.ansi-blue-fg { color: #208FFB; } +.ansi-blue-bg { background-color: #208FFB; } +.ansi-blue-intense-fg { color: #0065CA; } +.ansi-blue-intense-bg { background-color: #0065CA; } +.ansi-magenta-fg { color: #D160C4; } +.ansi-magenta-bg { background-color: #D160C4; } +.ansi-magenta-intense-fg { color: #A03196; } +.ansi-magenta-intense-bg { background-color: #A03196; } +.ansi-cyan-fg { color: #60C6C8; } +.ansi-cyan-bg { background-color: #60C6C8; } +.ansi-cyan-intense-fg { color: #258F8F; } +.ansi-cyan-intense-bg { background-color: #258F8F; } +.ansi-white-fg { color: #C5C1B4; } +.ansi-white-bg { background-color: #C5C1B4; } +.ansi-white-intense-fg { color: #A1A6B2; } +.ansi-white-intense-bg { background-color: #A1A6B2; } + +.ansi-default-inverse-fg { color: #FFFFFF; } +.ansi-default-inverse-bg { background-color: #000000; } + +.ansi-bold { font-weight: bold; } +.ansi-underline { text-decoration: underline; } + + +div.nbinput.container div.input_area div[class*=highlight] > pre, +div.nboutput.container div.output_area div[class*=highlight] > pre, +div.nboutput.container div.output_area div[class*=highlight].math, +div.nboutput.container div.output_area.rendered_html, +div.nboutput.container div.output_area > div.output_javascript, +div.nboutput.container div.output_area:not(.rendered_html) > img{ + padding: 5px; + margin: 0; +} + +/* fix copybtn overflow problem in chromium (needed for 'sphinx_copybutton') */ +div.nbinput.container div.input_area > div[class^='highlight'], +div.nboutput.container div.output_area > div[class^='highlight']{ + overflow-y: hidden; +} + +/* hide copybtn icon on prompts (needed for 'sphinx_copybutton') */ +.prompt .copybtn { + display: none; +} + +/* Some additional styling taken form the Jupyter notebook CSS */ +.jp-RenderedHTMLCommon table, +div.rendered_html table { + border: none; + border-collapse: collapse; + border-spacing: 0; + color: black; + font-size: 12px; + table-layout: fixed; +} +.jp-RenderedHTMLCommon thead, +div.rendered_html thead { + border-bottom: 1px solid black; + vertical-align: bottom; +} +.jp-RenderedHTMLCommon tr, +.jp-RenderedHTMLCommon th, +.jp-RenderedHTMLCommon td, +div.rendered_html tr, +div.rendered_html th, +div.rendered_html td { + text-align: right; + vertical-align: middle; + padding: 0.5em 0.5em; + line-height: normal; + white-space: normal; + max-width: none; + border: none; +} +.jp-RenderedHTMLCommon th, +div.rendered_html th { + font-weight: bold; +} +.jp-RenderedHTMLCommon tbody tr:nth-child(odd), +div.rendered_html tbody tr:nth-child(odd) { + background: #f5f5f5; +} +.jp-RenderedHTMLCommon tbody tr:hover, +div.rendered_html tbody tr:hover { + background: rgba(66, 165, 245, 0.2); +} + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/nbsphinx-gallery.css b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/nbsphinx-gallery.css new file mode 100644 index 0000000000000000000000000000000000000000..365c27a96b99ecee2ba1130a2262042e1a7a69db --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/nbsphinx-gallery.css @@ -0,0 +1,31 @@ +.nbsphinx-gallery { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(160px, 1fr)); + gap: 5px; + margin-top: 1em; + margin-bottom: 1em; +} + +.nbsphinx-gallery > a { + padding: 5px; + border: 1px dotted currentColor; + border-radius: 2px; + text-align: center; +} + +.nbsphinx-gallery > a:hover { + border-style: solid; +} + +.nbsphinx-gallery img { + max-width: 100%; + max-height: 100%; +} + +.nbsphinx-gallery > a > div:first-child { + display: flex; + align-items: start; + justify-content: center; + height: 120px; + margin-bottom: 5px; +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/nbsphinx-no-thumbnail.svg b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/nbsphinx-no-thumbnail.svg new file mode 100644 index 0000000000000000000000000000000000000000..9dca7588fa52fd8b6a3722f90f8d1d6fcedab3ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/nbsphinx-no-thumbnail.svg @@ -0,0 +1,9 @@ + + + + diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/pygments.css b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/pygments.css new file mode 100644 index 0000000000000000000000000000000000000000..2de64167b138950fd5d3a0563d61a02abaaae80b --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/pygments.css @@ -0,0 +1,246 @@ +.highlight pre { line-height: 125%; } +.highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +.highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +.highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #ffffcc } +.highlight { background: #eeffcc; } +.highlight .c { color: #408090; font-style: italic } /* Comment */ +.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #007020; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666 } /* Operator */ +.highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #007020 } /* Comment.Preproc */ +.highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ +.highlight .gd { color: #A00000 } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .gr { color: #FF0000 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #00A000 } /* Generic.Inserted */ +.highlight .go { color: #333333 } /* Generic.Output */ +.highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #0044DD } /* Generic.Traceback */ +.highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #007020 } /* Keyword.Pseudo */ +.highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #902000 } /* Keyword.Type */ +.highlight .m { color: #208050 } /* Literal.Number */ +.highlight .s { color: #4070a0 } /* Literal.String */ +.highlight .na { color: #4070a0 } /* Name.Attribute */ +.highlight .nb { color: #007020 } /* Name.Builtin */ +.highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ +.highlight .no { color: #60add5 } /* Name.Constant */ +.highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ +.highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #007020 } /* Name.Exception */ +.highlight .nf { color: #06287e } /* Name.Function */ +.highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ +.highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #bb60d5 } /* Name.Variable */ +.highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mb { color: #208050 } /* Literal.Number.Bin */ +.highlight .mf { color: #208050 } /* Literal.Number.Float */ +.highlight .mh { color: #208050 } /* Literal.Number.Hex */ +.highlight .mi { color: #208050 } /* Literal.Number.Integer */ +.highlight .mo { color: #208050 } /* Literal.Number.Oct */ +.highlight .sa { color: #4070a0 } /* Literal.String.Affix */ +.highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ +.highlight .sc { color: #4070a0 } /* Literal.String.Char */ +.highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */ +.highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #4070a0 } /* Literal.String.Double */ +.highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ +.highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ +.highlight .sx { color: #c65d09 } /* Literal.String.Other */ +.highlight .sr { color: #235388 } /* Literal.String.Regex */ +.highlight .s1 { color: #4070a0 } /* Literal.String.Single */ +.highlight .ss { color: #517918 } /* Literal.String.Symbol */ +.highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #06287e } /* Name.Function.Magic */ +.highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ +.highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ +.highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ +.highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */ +.highlight .il { color: #208050 } /* Literal.Number.Integer.Long */ +@media not print { +body[data-theme="dark"] .highlight pre { line-height: 125%; } +body[data-theme="dark"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight .hll { background-color: #49483e } +body[data-theme="dark"] .highlight { background: #272822; color: #f8f8f2 } +body[data-theme="dark"] .highlight .c { color: #75715e } /* Comment */ +body[data-theme="dark"] .highlight .err { color: #960050; background-color: #1e0010 } /* Error */ +body[data-theme="dark"] .highlight .esc { color: #f8f8f2 } /* Escape */ +body[data-theme="dark"] .highlight .g { color: #f8f8f2 } /* Generic */ +body[data-theme="dark"] .highlight .k { color: #66d9ef } /* Keyword */ +body[data-theme="dark"] .highlight .l { color: #ae81ff } /* Literal */ +body[data-theme="dark"] .highlight .n { color: #f8f8f2 } /* Name */ +body[data-theme="dark"] .highlight .o { color: #f92672 } /* Operator */ +body[data-theme="dark"] .highlight .x { color: #f8f8f2 } /* Other */ +body[data-theme="dark"] .highlight .p { color: #f8f8f2 } /* Punctuation */ +body[data-theme="dark"] .highlight .ch { color: #75715e } /* Comment.Hashbang */ +body[data-theme="dark"] .highlight .cm { color: #75715e } /* Comment.Multiline */ +body[data-theme="dark"] .highlight .cp { color: #75715e } /* Comment.Preproc */ +body[data-theme="dark"] .highlight .cpf { color: #75715e } /* Comment.PreprocFile */ +body[data-theme="dark"] .highlight .c1 { color: #75715e } /* Comment.Single */ +body[data-theme="dark"] .highlight .cs { color: #75715e } /* Comment.Special */ +body[data-theme="dark"] .highlight .gd { color: #f92672 } /* Generic.Deleted */ +body[data-theme="dark"] .highlight .ge { color: #f8f8f2; font-style: italic } /* Generic.Emph */ +body[data-theme="dark"] .highlight .gr { color: #f8f8f2 } /* Generic.Error */ +body[data-theme="dark"] .highlight .gh { color: #f8f8f2 } /* Generic.Heading */ +body[data-theme="dark"] .highlight .gi { color: #a6e22e } /* Generic.Inserted */ +body[data-theme="dark"] .highlight .go { color: #66d9ef } /* Generic.Output */ +body[data-theme="dark"] .highlight .gp { color: #f92672; font-weight: bold } /* Generic.Prompt */ +body[data-theme="dark"] .highlight .gs { color: #f8f8f2; font-weight: bold } /* Generic.Strong */ +body[data-theme="dark"] .highlight .gu { color: #75715e } /* Generic.Subheading */ +body[data-theme="dark"] .highlight .gt { color: #f8f8f2 } /* Generic.Traceback */ +body[data-theme="dark"] .highlight .kc { color: #66d9ef } /* Keyword.Constant */ +body[data-theme="dark"] .highlight .kd { color: #66d9ef } /* Keyword.Declaration */ +body[data-theme="dark"] .highlight .kn { color: #f92672 } /* Keyword.Namespace */ +body[data-theme="dark"] .highlight .kp { color: #66d9ef } /* Keyword.Pseudo */ +body[data-theme="dark"] .highlight .kr { color: #66d9ef } /* Keyword.Reserved */ +body[data-theme="dark"] .highlight .kt { color: #66d9ef } /* Keyword.Type */ +body[data-theme="dark"] .highlight .ld { color: #e6db74 } /* Literal.Date */ +body[data-theme="dark"] .highlight .m { color: #ae81ff } /* Literal.Number */ +body[data-theme="dark"] .highlight .s { color: #e6db74 } /* Literal.String */ +body[data-theme="dark"] .highlight .na { color: #a6e22e } /* Name.Attribute */ +body[data-theme="dark"] .highlight .nb { color: #f8f8f2 } /* Name.Builtin */ +body[data-theme="dark"] .highlight .nc { color: #a6e22e } /* Name.Class */ +body[data-theme="dark"] .highlight .no { color: #66d9ef } /* Name.Constant */ +body[data-theme="dark"] .highlight .nd { color: #a6e22e } /* Name.Decorator */ +body[data-theme="dark"] .highlight .ni { color: #f8f8f2 } /* Name.Entity */ +body[data-theme="dark"] .highlight .ne { color: #a6e22e } /* Name.Exception */ +body[data-theme="dark"] .highlight .nf { color: #a6e22e } /* Name.Function */ +body[data-theme="dark"] .highlight .nl { color: #f8f8f2 } /* Name.Label */ +body[data-theme="dark"] .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ +body[data-theme="dark"] .highlight .nx { color: #a6e22e } /* Name.Other */ +body[data-theme="dark"] .highlight .py { color: #f8f8f2 } /* Name.Property */ +body[data-theme="dark"] .highlight .nt { color: #f92672 } /* Name.Tag */ +body[data-theme="dark"] .highlight .nv { color: #f8f8f2 } /* Name.Variable */ +body[data-theme="dark"] .highlight .ow { color: #f92672 } /* Operator.Word */ +body[data-theme="dark"] .highlight .pm { color: #f8f8f2 } /* Punctuation.Marker */ +body[data-theme="dark"] .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ +body[data-theme="dark"] .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */ +body[data-theme="dark"] .highlight .mf { color: #ae81ff } /* Literal.Number.Float */ +body[data-theme="dark"] .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */ +body[data-theme="dark"] .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */ +body[data-theme="dark"] .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */ +body[data-theme="dark"] .highlight .sa { color: #e6db74 } /* Literal.String.Affix */ +body[data-theme="dark"] .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */ +body[data-theme="dark"] .highlight .sc { color: #e6db74 } /* Literal.String.Char */ +body[data-theme="dark"] .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */ +body[data-theme="dark"] .highlight .sd { color: #e6db74 } /* Literal.String.Doc */ +body[data-theme="dark"] .highlight .s2 { color: #e6db74 } /* Literal.String.Double */ +body[data-theme="dark"] .highlight .se { color: #ae81ff } /* Literal.String.Escape */ +body[data-theme="dark"] .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */ +body[data-theme="dark"] .highlight .si { color: #e6db74 } /* Literal.String.Interpol */ +body[data-theme="dark"] .highlight .sx { color: #e6db74 } /* Literal.String.Other */ +body[data-theme="dark"] .highlight .sr { color: #e6db74 } /* Literal.String.Regex */ +body[data-theme="dark"] .highlight .s1 { color: #e6db74 } /* Literal.String.Single */ +body[data-theme="dark"] .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */ +body[data-theme="dark"] .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */ +body[data-theme="dark"] .highlight .fm { color: #a6e22e } /* Name.Function.Magic */ +body[data-theme="dark"] .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */ +body[data-theme="dark"] .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */ +body[data-theme="dark"] .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ +body[data-theme="dark"] .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ +body[data-theme="dark"] .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */ +@media (prefers-color-scheme: dark) { +body:not([data-theme="light"]) .highlight pre { line-height: 125%; } +body:not([data-theme="light"]) .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight .hll { background-color: #49483e } +body:not([data-theme="light"]) .highlight { background: #272822; color: #f8f8f2 } +body:not([data-theme="light"]) .highlight .c { color: #75715e } /* Comment */ +body:not([data-theme="light"]) .highlight .err { color: #960050; background-color: #1e0010 } /* Error */ +body:not([data-theme="light"]) .highlight .esc { color: #f8f8f2 } /* Escape */ +body:not([data-theme="light"]) .highlight .g { color: #f8f8f2 } /* Generic */ +body:not([data-theme="light"]) .highlight .k { color: #66d9ef } /* Keyword */ +body:not([data-theme="light"]) .highlight .l { color: #ae81ff } /* Literal */ +body:not([data-theme="light"]) .highlight .n { color: #f8f8f2 } /* Name */ +body:not([data-theme="light"]) .highlight .o { color: #f92672 } /* Operator */ +body:not([data-theme="light"]) .highlight .x { color: #f8f8f2 } /* Other */ +body:not([data-theme="light"]) .highlight .p { color: #f8f8f2 } /* Punctuation */ +body:not([data-theme="light"]) .highlight .ch { color: #75715e } /* Comment.Hashbang */ +body:not([data-theme="light"]) .highlight .cm { color: #75715e } /* Comment.Multiline */ +body:not([data-theme="light"]) .highlight .cp { color: #75715e } /* Comment.Preproc */ +body:not([data-theme="light"]) .highlight .cpf { color: #75715e } /* Comment.PreprocFile */ +body:not([data-theme="light"]) .highlight .c1 { color: #75715e } /* Comment.Single */ +body:not([data-theme="light"]) .highlight .cs { color: #75715e } /* Comment.Special */ +body:not([data-theme="light"]) .highlight .gd { color: #f92672 } /* Generic.Deleted */ +body:not([data-theme="light"]) .highlight .ge { color: #f8f8f2; font-style: italic } /* Generic.Emph */ +body:not([data-theme="light"]) .highlight .gr { color: #f8f8f2 } /* Generic.Error */ +body:not([data-theme="light"]) .highlight .gh { color: #f8f8f2 } /* Generic.Heading */ +body:not([data-theme="light"]) .highlight .gi { color: #a6e22e } /* Generic.Inserted */ +body:not([data-theme="light"]) .highlight .go { color: #66d9ef } /* Generic.Output */ +body:not([data-theme="light"]) .highlight .gp { color: #f92672; font-weight: bold } /* Generic.Prompt */ +body:not([data-theme="light"]) .highlight .gs { color: #f8f8f2; font-weight: bold } /* Generic.Strong */ +body:not([data-theme="light"]) .highlight .gu { color: #75715e } /* Generic.Subheading */ +body:not([data-theme="light"]) .highlight .gt { color: #f8f8f2 } /* Generic.Traceback */ +body:not([data-theme="light"]) .highlight .kc { color: #66d9ef } /* Keyword.Constant */ +body:not([data-theme="light"]) .highlight .kd { color: #66d9ef } /* Keyword.Declaration */ +body:not([data-theme="light"]) .highlight .kn { color: #f92672 } /* Keyword.Namespace */ +body:not([data-theme="light"]) .highlight .kp { color: #66d9ef } /* Keyword.Pseudo */ +body:not([data-theme="light"]) .highlight .kr { color: #66d9ef } /* Keyword.Reserved */ +body:not([data-theme="light"]) .highlight .kt { color: #66d9ef } /* Keyword.Type */ +body:not([data-theme="light"]) .highlight .ld { color: #e6db74 } /* Literal.Date */ +body:not([data-theme="light"]) .highlight .m { color: #ae81ff } /* Literal.Number */ +body:not([data-theme="light"]) .highlight .s { color: #e6db74 } /* Literal.String */ +body:not([data-theme="light"]) .highlight .na { color: #a6e22e } /* Name.Attribute */ +body:not([data-theme="light"]) .highlight .nb { color: #f8f8f2 } /* Name.Builtin */ +body:not([data-theme="light"]) .highlight .nc { color: #a6e22e } /* Name.Class */ +body:not([data-theme="light"]) .highlight .no { color: #66d9ef } /* Name.Constant */ +body:not([data-theme="light"]) .highlight .nd { color: #a6e22e } /* Name.Decorator */ +body:not([data-theme="light"]) .highlight .ni { color: #f8f8f2 } /* Name.Entity */ +body:not([data-theme="light"]) .highlight .ne { color: #a6e22e } /* Name.Exception */ +body:not([data-theme="light"]) .highlight .nf { color: #a6e22e } /* Name.Function */ +body:not([data-theme="light"]) .highlight .nl { color: #f8f8f2 } /* Name.Label */ +body:not([data-theme="light"]) .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ +body:not([data-theme="light"]) .highlight .nx { color: #a6e22e } /* Name.Other */ +body:not([data-theme="light"]) .highlight .py { color: #f8f8f2 } /* Name.Property */ +body:not([data-theme="light"]) .highlight .nt { color: #f92672 } /* Name.Tag */ +body:not([data-theme="light"]) .highlight .nv { color: #f8f8f2 } /* Name.Variable */ +body:not([data-theme="light"]) .highlight .ow { color: #f92672 } /* Operator.Word */ +body:not([data-theme="light"]) .highlight .pm { color: #f8f8f2 } /* Punctuation.Marker */ +body:not([data-theme="light"]) .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ +body:not([data-theme="light"]) .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */ +body:not([data-theme="light"]) .highlight .mf { color: #ae81ff } /* Literal.Number.Float */ +body:not([data-theme="light"]) .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */ +body:not([data-theme="light"]) .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */ +body:not([data-theme="light"]) .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */ +body:not([data-theme="light"]) .highlight .sa { color: #e6db74 } /* Literal.String.Affix */ +body:not([data-theme="light"]) .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */ +body:not([data-theme="light"]) .highlight .sc { color: #e6db74 } /* Literal.String.Char */ +body:not([data-theme="light"]) .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */ +body:not([data-theme="light"]) .highlight .sd { color: #e6db74 } /* Literal.String.Doc */ +body:not([data-theme="light"]) .highlight .s2 { color: #e6db74 } /* Literal.String.Double */ +body:not([data-theme="light"]) .highlight .se { color: #ae81ff } /* Literal.String.Escape */ +body:not([data-theme="light"]) .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */ +body:not([data-theme="light"]) .highlight .si { color: #e6db74 } /* Literal.String.Interpol */ +body:not([data-theme="light"]) .highlight .sx { color: #e6db74 } /* Literal.String.Other */ +body:not([data-theme="light"]) .highlight .sr { color: #e6db74 } /* Literal.String.Regex */ +body:not([data-theme="light"]) .highlight .s1 { color: #e6db74 } /* Literal.String.Single */ +body:not([data-theme="light"]) .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */ +body:not([data-theme="light"]) .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */ +body:not([data-theme="light"]) .highlight .fm { color: #a6e22e } /* Name.Function.Magic */ +body:not([data-theme="light"]) .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */ +body:not([data-theme="light"]) .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */ +body:not([data-theme="light"]) .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ +body:not([data-theme="light"]) .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ +body:not([data-theme="light"]) .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */ +} +} \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/scripts/furo-extensions.js b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/scripts/furo-extensions.js new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/scripts/furo.js b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/scripts/furo.js new file mode 100644 index 0000000000000000000000000000000000000000..cbf64878d002606657262b895b5592b324714ed0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/scripts/furo.js @@ -0,0 +1,3 @@ +/*! For license information please see furo.js.LICENSE.txt */ +(()=>{var t={212:function(t,e,n){var o,r;r=void 0!==n.g?n.g:"undefined"!=typeof window?window:this,o=function(){return function(t){"use strict";var e={navClass:"active",contentClass:"active",nested:!1,nestedClass:"active",offset:0,reflow:!1,events:!0},n=function(t,e,n){if(n.settings.events){var o=new CustomEvent(t,{bubbles:!0,cancelable:!0,detail:n});e.dispatchEvent(o)}},o=function(t){var e=0;if(t.offsetParent)for(;t;)e+=t.offsetTop,t=t.offsetParent;return e>=0?e:0},r=function(t){t&&t.sort((function(t,e){return o(t.content)=Math.max(document.body.scrollHeight,document.documentElement.scrollHeight,document.body.offsetHeight,document.documentElement.offsetHeight,document.body.clientHeight,document.documentElement.clientHeight)},l=function(t,e){var n=t[t.length-1];if(function(t,e){return!(!s()||!c(t.content,e,!0))}(n,e))return n;for(var o=t.length-1;o>=0;o--)if(c(t[o].content,e))return t[o]},a=function(t,e){if(e.nested&&t.parentNode){var n=t.parentNode.closest("li");n&&(n.classList.remove(e.nestedClass),a(n,e))}},i=function(t,e){if(t){var o=t.nav.closest("li");o&&(o.classList.remove(e.navClass),t.content.classList.remove(e.contentClass),a(o,e),n("gumshoeDeactivate",o,{link:t.nav,content:t.content,settings:e}))}},u=function(t,e){if(e.nested){var n=t.parentNode.closest("li");n&&(n.classList.add(e.nestedClass),u(n,e))}};return function(o,c){var s,a,d,f,m,v={setup:function(){s=document.querySelectorAll(o),a=[],Array.prototype.forEach.call(s,(function(t){var e=document.getElementById(decodeURIComponent(t.hash.substr(1)));e&&a.push({nav:t,content:e})})),r(a)},detect:function(){var t=l(a,m);t?d&&t.content===d.content||(i(d,m),function(t,e){if(t){var o=t.nav.closest("li");o&&(o.classList.add(e.navClass),t.content.classList.add(e.contentClass),u(o,e),n("gumshoeActivate",o,{link:t.nav,content:t.content,settings:e}))}}(t,m),d=t):d&&(i(d,m),d=null)}},h=function(e){f&&t.cancelAnimationFrame(f),f=t.requestAnimationFrame(v.detect)},g=function(e){f&&t.cancelAnimationFrame(f),f=t.requestAnimationFrame((function(){r(a),v.detect()}))};return v.destroy=function(){d&&i(d,m),t.removeEventListener("scroll",h,!1),m.reflow&&t.removeEventListener("resize",g,!1),a=null,s=null,d=null,f=null,m=null},m=function(){var t={};return Array.prototype.forEach.call(arguments,(function(e){for(var n in e){if(!e.hasOwnProperty(n))return;t[n]=e[n]}})),t}(e,c||{}),v.setup(),v.detect(),t.addEventListener("scroll",h,!1),m.reflow&&t.addEventListener("resize",g,!1),v}}(r)}.apply(e,[]),void 0===o||(t.exports=o)}},e={};function n(o){var r=e[o];if(void 0!==r)return r.exports;var c=e[o]={exports:{}};return t[o].call(c.exports,c,c.exports,n),c.exports}n.n=t=>{var e=t&&t.__esModule?()=>t.default:()=>t;return n.d(e,{a:e}),e},n.d=(t,e)=>{for(var o in e)n.o(e,o)&&!n.o(t,o)&&Object.defineProperty(t,o,{enumerable:!0,get:e[o]})},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(t){if("object"==typeof window)return window}}(),n.o=(t,e)=>Object.prototype.hasOwnProperty.call(t,e),(()=>{"use strict";var t=n(212),e=n.n(t),o=null,r=null,c=window.pageYOffset||document.documentElement.scrollTop;function s(){const t=localStorage.getItem("theme")||"auto";var e;"light"!==(e=window.matchMedia("(prefers-color-scheme: dark)").matches?"auto"===t?"light":"light"==t?"dark":"auto":"auto"===t?"dark":"dark"==t?"light":"auto")&&"dark"!==e&&"auto"!==e&&(console.error(`Got invalid theme mode: ${e}. Resetting to auto.`),e="auto"),document.body.dataset.theme=e,localStorage.setItem("theme",e),console.log(`Changed to ${e} mode.`)}function l(){!function(){const t=document.getElementsByClassName("theme-toggle");Array.from(t).forEach((t=>{t.addEventListener("click",s)}))}(),function(){let t=0,e=!1;window.addEventListener("scroll",(function(n){t=window.scrollY,e||(window.requestAnimationFrame((function(){var n;n=t,0==Math.floor(r.getBoundingClientRect().top)?r.classList.add("scrolled"):r.classList.remove("scrolled"),function(t){t<64?document.documentElement.classList.remove("show-back-to-top"):tc&&document.documentElement.classList.remove("show-back-to-top"),c=t}(n),function(t){null!==o&&(0==t?o.scrollTo(0,0):Math.ceil(t)>=Math.floor(document.documentElement.scrollHeight-window.innerHeight)?o.scrollTo(0,o.scrollHeight):document.querySelector(".scroll-current"))}(n),e=!1})),e=!0)})),window.scroll()}(),null!==o&&new(e())(".toc-tree a",{reflow:!0,recursive:!0,navClass:"scroll-current",offset:()=>{let t=parseFloat(getComputedStyle(document.documentElement).fontSize);return r.getBoundingClientRect().height+.5*t+1}})}document.addEventListener("DOMContentLoaded",(function(){document.body.parentNode.classList.remove("no-js"),r=document.querySelector("header"),o=document.querySelector(".toc-scroll"),l()}))})()})(); +//# sourceMappingURL=furo.js.map \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/scripts/furo.js.LICENSE.txt b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/scripts/furo.js.LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..1632189c7e0cd1a17e452d2d0b24dfa741c7ea46 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/scripts/furo.js.LICENSE.txt @@ -0,0 +1,7 @@ +/*! + * gumshoejs v5.1.2 (patched by @pradyunsg) + * A simple, framework-agnostic scrollspy script. + * (c) 2019 Chris Ferdinandi + * MIT License + * http://github.com/cferdinandi/gumshoe + */ diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/scripts/furo.js.map b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/scripts/furo.js.map new file mode 100644 index 0000000000000000000000000000000000000000..7ed2be87dcf113b9f4c1be32b92aae905f5bb488 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/scripts/furo.js.map @@ -0,0 +1 @@ +{"version":3,"file":"scripts/furo.js","mappings":";iCAAA,MAQWA,SAWS,IAAX,EAAAC,EACH,EAAAA,EACkB,oBAAXC,OACPA,OACAC,KAbS,EAAF,WACP,OAaJ,SAAUD,GACR,aAMA,IAAIE,EAAW,CAEbC,SAAU,SACVC,aAAc,SAGdC,QAAQ,EACRC,YAAa,SAGbC,OAAQ,EACRC,QAAQ,EAGRC,QAAQ,GA6BNC,EAAY,SAAUC,EAAMC,EAAMC,GAEpC,GAAKA,EAAOC,SAASL,OAArB,CAGA,IAAIM,EAAQ,IAAIC,YAAYL,EAAM,CAChCM,SAAS,EACTC,YAAY,EACZL,OAAQA,IAIVD,EAAKO,cAAcJ,KAQjBK,EAAe,SAAUR,GAC3B,IAAIS,EAAW,EACf,GAAIT,EAAKU,aACP,KAAOV,GACLS,GAAYT,EAAKW,UACjBX,EAAOA,EAAKU,aAGhB,OAAOD,GAAY,EAAIA,EAAW,GAOhCG,EAAe,SAAUC,GACvBA,GACFA,EAASC,MAAK,SAAUC,EAAOC,GAG7B,OAFcR,EAAaO,EAAME,SACnBT,EAAaQ,EAAMC,UACF,EACxB,MA2CTC,EAAW,SAAUlB,EAAME,EAAUiB,GACvC,IAAIC,EAASpB,EAAKqB,wBACd1B,EAnCU,SAAUO,GAExB,MAA+B,mBAApBA,EAASP,OACX2B,WAAWpB,EAASP,UAItB2B,WAAWpB,EAASP,QA4Bd4B,CAAUrB,GACvB,OAAIiB,EAEAK,SAASJ,EAAOD,OAAQ,KACvB/B,EAAOqC,aAAeC,SAASC,gBAAgBC,cAG7CJ,SAASJ,EAAOS,IAAK,KAAOlC,GAOjCmC,EAAa,WACf,OACEC,KAAKC,KAAK5C,EAAOqC,YAAcrC,EAAO6C,cAnCjCF,KAAKG,IACVR,SAASS,KAAKC,aACdV,SAASC,gBAAgBS,aACzBV,SAASS,KAAKE,aACdX,SAASC,gBAAgBU,aACzBX,SAASS,KAAKP,aACdF,SAASC,gBAAgBC,eAqDzBU,EAAY,SAAUzB,EAAUX,GAClC,IAAIqC,EAAO1B,EAASA,EAAS2B,OAAS,GACtC,GAbgB,SAAUC,EAAMvC,GAChC,SAAI4B,MAAgBZ,EAASuB,EAAKxB,QAASf,GAAU,IAYjDwC,CAAYH,EAAMrC,GAAW,OAAOqC,EACxC,IAAK,IAAII,EAAI9B,EAAS2B,OAAS,EAAGG,GAAK,EAAGA,IACxC,GAAIzB,EAASL,EAAS8B,GAAG1B,QAASf,GAAW,OAAOW,EAAS8B,IAS7DC,EAAmB,SAAUC,EAAK3C,GAEpC,GAAKA,EAAST,QAAWoD,EAAIC,WAA7B,CAGA,IAAIC,EAAKF,EAAIC,WAAWE,QAAQ,MAC3BD,IAGLA,EAAGE,UAAUC,OAAOhD,EAASR,aAG7BkD,EAAiBG,EAAI7C,MAQnBiD,EAAa,SAAUC,EAAOlD,GAEhC,GAAKkD,EAAL,CAGA,IAAIL,EAAKK,EAAMP,IAAIG,QAAQ,MACtBD,IAGLA,EAAGE,UAAUC,OAAOhD,EAASX,UAC7B6D,EAAMnC,QAAQgC,UAAUC,OAAOhD,EAASV,cAGxCoD,EAAiBG,EAAI7C,GAGrBJ,EAAU,oBAAqBiD,EAAI,CACjCM,KAAMD,EAAMP,IACZ5B,QAASmC,EAAMnC,QACff,SAAUA,OASVoD,EAAiB,SAAUT,EAAK3C,GAElC,GAAKA,EAAST,OAAd,CAGA,IAAIsD,EAAKF,EAAIC,WAAWE,QAAQ,MAC3BD,IAGLA,EAAGE,UAAUM,IAAIrD,EAASR,aAG1B4D,EAAeP,EAAI7C,MA8LrB,OA1JkB,SAAUsD,EAAUC,GAKpC,IACIC,EAAU7C,EAAU8C,EAASC,EAAS1D,EADtC2D,EAAa,CAUjBA,MAAmB,WAEjBH,EAAWhC,SAASoC,iBAAiBN,GAGrC3C,EAAW,GAGXkD,MAAMC,UAAUC,QAAQC,KAAKR,GAAU,SAAUjB,GAE/C,IAAIxB,EAAUS,SAASyC,eACrBC,mBAAmB3B,EAAK4B,KAAKC,OAAO,KAEjCrD,GAGLJ,EAAS0D,KAAK,CACZ1B,IAAKJ,EACLxB,QAASA,OAKbL,EAAaC,IAMfgD,OAAoB,WAElB,IAAIW,EAASlC,EAAUzB,EAAUX,GAG5BsE,EASDb,GAAWa,EAAOvD,UAAY0C,EAAQ1C,UAG1CkC,EAAWQ,EAASzD,GAzFT,SAAUkD,EAAOlD,GAE9B,GAAKkD,EAAL,CAGA,IAAIL,EAAKK,EAAMP,IAAIG,QAAQ,MACtBD,IAGLA,EAAGE,UAAUM,IAAIrD,EAASX,UAC1B6D,EAAMnC,QAAQgC,UAAUM,IAAIrD,EAASV,cAGrC8D,EAAeP,EAAI7C,GAGnBJ,EAAU,kBAAmBiD,EAAI,CAC/BM,KAAMD,EAAMP,IACZ5B,QAASmC,EAAMnC,QACff,SAAUA,MAuEVuE,CAASD,EAAQtE,GAGjByD,EAAUa,GAfJb,IACFR,EAAWQ,EAASzD,GACpByD,EAAU,QAoBZe,EAAgB,SAAUvE,GAExByD,GACFxE,EAAOuF,qBAAqBf,GAI9BA,EAAUxE,EAAOwF,sBAAsBf,EAAWgB,SAOhDC,EAAgB,SAAU3E,GAExByD,GACFxE,EAAOuF,qBAAqBf,GAI9BA,EAAUxE,EAAOwF,uBAAsB,WACrChE,EAAaC,GACbgD,EAAWgB,aAoDf,OA7CAhB,EAAWkB,QAAU,WAEfpB,GACFR,EAAWQ,EAASzD,GAItBd,EAAO4F,oBAAoB,SAAUN,GAAe,GAChDxE,EAASN,QACXR,EAAO4F,oBAAoB,SAAUF,GAAe,GAItDjE,EAAW,KACX6C,EAAW,KACXC,EAAU,KACVC,EAAU,KACV1D,EAAW,MAQXA,EA3XS,WACX,IAAI+E,EAAS,GAOb,OANAlB,MAAMC,UAAUC,QAAQC,KAAKgB,WAAW,SAAUC,GAChD,IAAK,IAAIC,KAAOD,EAAK,CACnB,IAAKA,EAAIE,eAAeD,GAAM,OAC9BH,EAAOG,GAAOD,EAAIC,OAGfH,EAmXMK,CAAOhG,EAAUmE,GAAW,IAGvCI,EAAW0B,QAGX1B,EAAWgB,SAGXzF,EAAOoG,iBAAiB,SAAUd,GAAe,GAC7CxE,EAASN,QACXR,EAAOoG,iBAAiB,SAAUV,GAAe,GAS9CjB,GA7bA4B,CAAQvG,IAChB,QAFM,SAEN,uBCXDwG,EAA2B,GAG/B,SAASC,EAAoBC,GAE5B,IAAIC,EAAeH,EAAyBE,GAC5C,QAAqBE,IAAjBD,EACH,OAAOA,EAAaE,QAGrB,IAAIC,EAASN,EAAyBE,GAAY,CAGjDG,QAAS,IAOV,OAHAE,EAAoBL,GAAU1B,KAAK8B,EAAOD,QAASC,EAAQA,EAAOD,QAASJ,GAGpEK,EAAOD,QCpBfJ,EAAoBO,EAAKF,IACxB,IAAIG,EAASH,GAAUA,EAAOI,WAC7B,IAAOJ,EAAiB,QACxB,IAAM,EAEP,OADAL,EAAoBU,EAAEF,EAAQ,CAAEG,EAAGH,IAC5BA,GCLRR,EAAoBU,EAAI,CAACN,EAASQ,KACjC,IAAI,IAAInB,KAAOmB,EACXZ,EAAoBa,EAAED,EAAYnB,KAASO,EAAoBa,EAAET,EAASX,IAC5EqB,OAAOC,eAAeX,EAASX,EAAK,CAAEuB,YAAY,EAAMC,IAAKL,EAAWnB,MCJ3EO,EAAoBxG,EAAI,WACvB,GAA0B,iBAAf0H,WAAyB,OAAOA,WAC3C,IACC,OAAOxH,MAAQ,IAAIyH,SAAS,cAAb,GACd,MAAOC,GACR,GAAsB,iBAAX3H,OAAqB,OAAOA,QALjB,GCAxBuG,EAAoBa,EAAI,CAACrB,EAAK6B,IAAUP,OAAOzC,UAAUqB,eAAenB,KAAKiB,EAAK6B,4CCK9EC,EAAY,KACZC,EAAS,KACTC,EAAgB/H,OAAO6C,aAAeP,SAASC,gBAAgByF,UA4EnE,SAASC,IACP,MAAMC,EAAeC,aAAaC,QAAQ,UAAY,OAZxD,IAAkBC,EACH,WADGA,EAaIrI,OAAOsI,WAAW,gCAAgCC,QAI/C,SAAjBL,EACO,QACgB,SAAhBA,EACA,OAEA,OAIU,SAAjBA,EACO,OACgB,QAAhBA,EACA,QAEA,SA9BoB,SAATG,GAA4B,SAATA,IACzCG,QAAQC,MAAM,2BAA2BJ,yBACzCA,EAAO,QAGT/F,SAASS,KAAK2F,QAAQC,MAAQN,EAC9BF,aAAaS,QAAQ,QAASP,GAC9BG,QAAQK,IAAI,cAAcR,WA4E5B,SAASlC,KART,WAEE,MAAM2C,EAAUxG,SAASyG,uBAAuB,gBAChDpE,MAAMqE,KAAKF,GAASjE,SAASoE,IAC3BA,EAAI7C,iBAAiB,QAAS6B,MAKhCiB,GA9CF,WAEE,IAAIC,EAA6B,EAC7BC,GAAU,EAEdpJ,OAAOoG,iBAAiB,UAAU,SAAUuB,GAC1CwB,EAA6BnJ,OAAOqJ,QAE/BD,IACHpJ,OAAOwF,uBAAsB,WAzDnC,IAAuB8D,IA0DDH,EA9GkC,GAAlDxG,KAAK4G,MAAMzB,EAAO7F,wBAAwBQ,KAC5CqF,EAAOjE,UAAUM,IAAI,YAErB2D,EAAOjE,UAAUC,OAAO,YAI5B,SAAmCwF,GAC7BA,EAXmB,GAYrBhH,SAASC,gBAAgBsB,UAAUC,OAAO,oBAEtCwF,EAAYvB,EACdzF,SAASC,gBAAgBsB,UAAUM,IAAI,oBAC9BmF,EAAYvB,GACrBzF,SAASC,gBAAgBsB,UAAUC,OAAO,oBAG9CiE,EAAgBuB,EAqChBE,CAA0BF,GAlC5B,SAA6BA,GACT,OAAdzB,IAKa,GAAbyB,EACFzB,EAAU4B,SAAS,EAAG,GAGtB9G,KAAKC,KAAK0G,IACV3G,KAAK4G,MAAMjH,SAASC,gBAAgBS,aAAehD,OAAOqC,aAE1DwF,EAAU4B,SAAS,EAAG5B,EAAU7E,cAGhBV,SAASoH,cAAc,oBAmBzCC,CAAoBL,GAwDdF,GAAU,KAGZA,GAAU,MAGdpJ,OAAO4J,SA8BPC,GA1BkB,OAAdhC,GAKJ,IAAI,IAAJ,CAAY,cAAe,CACzBrH,QAAQ,EACRsJ,WAAW,EACX3J,SAAU,iBACVI,OAAQ,KACN,IAAIwJ,EAAM7H,WAAW8H,iBAAiB1H,SAASC,iBAAiB0H,UAChE,OAAOnC,EAAO7F,wBAAwBiI,OAAS,GAAMH,EAAM,KA+BjEzH,SAAS8D,iBAAiB,oBAT1B,WACE9D,SAASS,KAAKW,WAAWG,UAAUC,OAAO,SAE1CgE,EAASxF,SAASoH,cAAc,UAChC7B,EAAYvF,SAASoH,cAAc,eAEnCvD","sources":["webpack:///./src/furo/assets/scripts/gumshoe-patched.js","webpack:///webpack/bootstrap","webpack:///webpack/runtime/compat get default export","webpack:///webpack/runtime/define property getters","webpack:///webpack/runtime/global","webpack:///webpack/runtime/hasOwnProperty shorthand","webpack:///./src/furo/assets/scripts/furo.js"],"sourcesContent":["/*!\n * gumshoejs v5.1.2 (patched by @pradyunsg)\n * A simple, framework-agnostic scrollspy script.\n * (c) 2019 Chris Ferdinandi\n * MIT License\n * http://github.com/cferdinandi/gumshoe\n */\n\n(function (root, factory) {\n if (typeof define === \"function\" && define.amd) {\n define([], function () {\n return factory(root);\n });\n } else if (typeof exports === \"object\") {\n module.exports = factory(root);\n } else {\n root.Gumshoe = factory(root);\n }\n})(\n typeof global !== \"undefined\"\n ? global\n : typeof window !== \"undefined\"\n ? window\n : this,\n function (window) {\n \"use strict\";\n\n //\n // Defaults\n //\n\n var defaults = {\n // Active classes\n navClass: \"active\",\n contentClass: \"active\",\n\n // Nested navigation\n nested: false,\n nestedClass: \"active\",\n\n // Offset & reflow\n offset: 0,\n reflow: false,\n\n // Event support\n events: true,\n };\n\n //\n // Methods\n //\n\n /**\n * Merge two or more objects together.\n * @param {Object} objects The objects to merge together\n * @returns {Object} Merged values of defaults and options\n */\n var extend = function () {\n var merged = {};\n Array.prototype.forEach.call(arguments, function (obj) {\n for (var key in obj) {\n if (!obj.hasOwnProperty(key)) return;\n merged[key] = obj[key];\n }\n });\n return merged;\n };\n\n /**\n * Emit a custom event\n * @param {String} type The event type\n * @param {Node} elem The element to attach the event to\n * @param {Object} detail Any details to pass along with the event\n */\n var emitEvent = function (type, elem, detail) {\n // Make sure events are enabled\n if (!detail.settings.events) return;\n\n // Create a new event\n var event = new CustomEvent(type, {\n bubbles: true,\n cancelable: true,\n detail: detail,\n });\n\n // Dispatch the event\n elem.dispatchEvent(event);\n };\n\n /**\n * Get an element's distance from the top of the Document.\n * @param {Node} elem The element\n * @return {Number} Distance from the top in pixels\n */\n var getOffsetTop = function (elem) {\n var location = 0;\n if (elem.offsetParent) {\n while (elem) {\n location += elem.offsetTop;\n elem = elem.offsetParent;\n }\n }\n return location >= 0 ? location : 0;\n };\n\n /**\n * Sort content from first to last in the DOM\n * @param {Array} contents The content areas\n */\n var sortContents = function (contents) {\n if (contents) {\n contents.sort(function (item1, item2) {\n var offset1 = getOffsetTop(item1.content);\n var offset2 = getOffsetTop(item2.content);\n if (offset1 < offset2) return -1;\n return 1;\n });\n }\n };\n\n /**\n * Get the offset to use for calculating position\n * @param {Object} settings The settings for this instantiation\n * @return {Float} The number of pixels to offset the calculations\n */\n var getOffset = function (settings) {\n // if the offset is a function run it\n if (typeof settings.offset === \"function\") {\n return parseFloat(settings.offset());\n }\n\n // Otherwise, return it as-is\n return parseFloat(settings.offset);\n };\n\n /**\n * Get the document element's height\n * @private\n * @returns {Number}\n */\n var getDocumentHeight = function () {\n return Math.max(\n document.body.scrollHeight,\n document.documentElement.scrollHeight,\n document.body.offsetHeight,\n document.documentElement.offsetHeight,\n document.body.clientHeight,\n document.documentElement.clientHeight,\n );\n };\n\n /**\n * Determine if an element is in view\n * @param {Node} elem The element\n * @param {Object} settings The settings for this instantiation\n * @param {Boolean} bottom If true, check if element is above bottom of viewport instead\n * @return {Boolean} Returns true if element is in the viewport\n */\n var isInView = function (elem, settings, bottom) {\n var bounds = elem.getBoundingClientRect();\n var offset = getOffset(settings);\n if (bottom) {\n return (\n parseInt(bounds.bottom, 10) <\n (window.innerHeight || document.documentElement.clientHeight)\n );\n }\n return parseInt(bounds.top, 10) <= offset;\n };\n\n /**\n * Check if at the bottom of the viewport\n * @return {Boolean} If true, page is at the bottom of the viewport\n */\n var isAtBottom = function () {\n if (\n Math.ceil(window.innerHeight + window.pageYOffset) >=\n getDocumentHeight()\n )\n return true;\n return false;\n };\n\n /**\n * Check if the last item should be used (even if not at the top of the page)\n * @param {Object} item The last item\n * @param {Object} settings The settings for this instantiation\n * @return {Boolean} If true, use the last item\n */\n var useLastItem = function (item, settings) {\n if (isAtBottom() && isInView(item.content, settings, true)) return true;\n return false;\n };\n\n /**\n * Get the active content\n * @param {Array} contents The content areas\n * @param {Object} settings The settings for this instantiation\n * @return {Object} The content area and matching navigation link\n */\n var getActive = function (contents, settings) {\n var last = contents[contents.length - 1];\n if (useLastItem(last, settings)) return last;\n for (var i = contents.length - 1; i >= 0; i--) {\n if (isInView(contents[i].content, settings)) return contents[i];\n }\n };\n\n /**\n * Deactivate parent navs in a nested navigation\n * @param {Node} nav The starting navigation element\n * @param {Object} settings The settings for this instantiation\n */\n var deactivateNested = function (nav, settings) {\n // If nesting isn't activated, bail\n if (!settings.nested || !nav.parentNode) return;\n\n // Get the parent navigation\n var li = nav.parentNode.closest(\"li\");\n if (!li) return;\n\n // Remove the active class\n li.classList.remove(settings.nestedClass);\n\n // Apply recursively to any parent navigation elements\n deactivateNested(li, settings);\n };\n\n /**\n * Deactivate a nav and content area\n * @param {Object} items The nav item and content to deactivate\n * @param {Object} settings The settings for this instantiation\n */\n var deactivate = function (items, settings) {\n // Make sure there are items to deactivate\n if (!items) return;\n\n // Get the parent list item\n var li = items.nav.closest(\"li\");\n if (!li) return;\n\n // Remove the active class from the nav and content\n li.classList.remove(settings.navClass);\n items.content.classList.remove(settings.contentClass);\n\n // Deactivate any parent navs in a nested navigation\n deactivateNested(li, settings);\n\n // Emit a custom event\n emitEvent(\"gumshoeDeactivate\", li, {\n link: items.nav,\n content: items.content,\n settings: settings,\n });\n };\n\n /**\n * Activate parent navs in a nested navigation\n * @param {Node} nav The starting navigation element\n * @param {Object} settings The settings for this instantiation\n */\n var activateNested = function (nav, settings) {\n // If nesting isn't activated, bail\n if (!settings.nested) return;\n\n // Get the parent navigation\n var li = nav.parentNode.closest(\"li\");\n if (!li) return;\n\n // Add the active class\n li.classList.add(settings.nestedClass);\n\n // Apply recursively to any parent navigation elements\n activateNested(li, settings);\n };\n\n /**\n * Activate a nav and content area\n * @param {Object} items The nav item and content to activate\n * @param {Object} settings The settings for this instantiation\n */\n var activate = function (items, settings) {\n // Make sure there are items to activate\n if (!items) return;\n\n // Get the parent list item\n var li = items.nav.closest(\"li\");\n if (!li) return;\n\n // Add the active class to the nav and content\n li.classList.add(settings.navClass);\n items.content.classList.add(settings.contentClass);\n\n // Activate any parent navs in a nested navigation\n activateNested(li, settings);\n\n // Emit a custom event\n emitEvent(\"gumshoeActivate\", li, {\n link: items.nav,\n content: items.content,\n settings: settings,\n });\n };\n\n /**\n * Create the Constructor object\n * @param {String} selector The selector to use for navigation items\n * @param {Object} options User options and settings\n */\n var Constructor = function (selector, options) {\n //\n // Variables\n //\n\n var publicAPIs = {};\n var navItems, contents, current, timeout, settings;\n\n //\n // Methods\n //\n\n /**\n * Set variables from DOM elements\n */\n publicAPIs.setup = function () {\n // Get all nav items\n navItems = document.querySelectorAll(selector);\n\n // Create contents array\n contents = [];\n\n // Loop through each item, get it's matching content, and push to the array\n Array.prototype.forEach.call(navItems, function (item) {\n // Get the content for the nav item\n var content = document.getElementById(\n decodeURIComponent(item.hash.substr(1)),\n );\n if (!content) return;\n\n // Push to the contents array\n contents.push({\n nav: item,\n content: content,\n });\n });\n\n // Sort contents by the order they appear in the DOM\n sortContents(contents);\n };\n\n /**\n * Detect which content is currently active\n */\n publicAPIs.detect = function () {\n // Get the active content\n var active = getActive(contents, settings);\n\n // if there's no active content, deactivate and bail\n if (!active) {\n if (current) {\n deactivate(current, settings);\n current = null;\n }\n return;\n }\n\n // If the active content is the one currently active, do nothing\n if (current && active.content === current.content) return;\n\n // Deactivate the current content and activate the new content\n deactivate(current, settings);\n activate(active, settings);\n\n // Update the currently active content\n current = active;\n };\n\n /**\n * Detect the active content on scroll\n * Debounced for performance\n */\n var scrollHandler = function (event) {\n // If there's a timer, cancel it\n if (timeout) {\n window.cancelAnimationFrame(timeout);\n }\n\n // Setup debounce callback\n timeout = window.requestAnimationFrame(publicAPIs.detect);\n };\n\n /**\n * Update content sorting on resize\n * Debounced for performance\n */\n var resizeHandler = function (event) {\n // If there's a timer, cancel it\n if (timeout) {\n window.cancelAnimationFrame(timeout);\n }\n\n // Setup debounce callback\n timeout = window.requestAnimationFrame(function () {\n sortContents(contents);\n publicAPIs.detect();\n });\n };\n\n /**\n * Destroy the current instantiation\n */\n publicAPIs.destroy = function () {\n // Undo DOM changes\n if (current) {\n deactivate(current, settings);\n }\n\n // Remove event listeners\n window.removeEventListener(\"scroll\", scrollHandler, false);\n if (settings.reflow) {\n window.removeEventListener(\"resize\", resizeHandler, false);\n }\n\n // Reset variables\n contents = null;\n navItems = null;\n current = null;\n timeout = null;\n settings = null;\n };\n\n /**\n * Initialize the current instantiation\n */\n var init = function () {\n // Merge user options into defaults\n settings = extend(defaults, options || {});\n\n // Setup variables based on the current DOM\n publicAPIs.setup();\n\n // Find the currently active content\n publicAPIs.detect();\n\n // Setup event listeners\n window.addEventListener(\"scroll\", scrollHandler, false);\n if (settings.reflow) {\n window.addEventListener(\"resize\", resizeHandler, false);\n }\n };\n\n //\n // Initialize and return the public APIs\n //\n\n init();\n return publicAPIs;\n };\n\n //\n // Return the Constructor\n //\n\n return Constructor;\n },\n);\n","// The module cache\nvar __webpack_module_cache__ = {};\n\n// The require function\nfunction __webpack_require__(moduleId) {\n\t// Check if module is in cache\n\tvar cachedModule = __webpack_module_cache__[moduleId];\n\tif (cachedModule !== undefined) {\n\t\treturn cachedModule.exports;\n\t}\n\t// Create a new module (and put it into the cache)\n\tvar module = __webpack_module_cache__[moduleId] = {\n\t\t// no module.id needed\n\t\t// no module.loaded needed\n\t\texports: {}\n\t};\n\n\t// Execute the module function\n\t__webpack_modules__[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n\n\t// Return the exports of the module\n\treturn module.exports;\n}\n\n","// getDefaultExport function for compatibility with non-harmony modules\n__webpack_require__.n = (module) => {\n\tvar getter = module && module.__esModule ?\n\t\t() => (module['default']) :\n\t\t() => (module);\n\t__webpack_require__.d(getter, { a: getter });\n\treturn getter;\n};","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.g = (function() {\n\tif (typeof globalThis === 'object') return globalThis;\n\ttry {\n\t\treturn this || new Function('return this')();\n\t} catch (e) {\n\t\tif (typeof window === 'object') return window;\n\t}\n})();","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","import Gumshoe from \"./gumshoe-patched.js\";\n\n////////////////////////////////////////////////////////////////////////////////\n// Scroll Handling\n////////////////////////////////////////////////////////////////////////////////\nvar tocScroll = null;\nvar header = null;\nvar lastScrollTop = window.pageYOffset || document.documentElement.scrollTop;\nconst GO_TO_TOP_OFFSET = 64;\n\nfunction scrollHandlerForHeader() {\n if (Math.floor(header.getBoundingClientRect().top) == 0) {\n header.classList.add(\"scrolled\");\n } else {\n header.classList.remove(\"scrolled\");\n }\n}\n\nfunction scrollHandlerForBackToTop(positionY) {\n if (positionY < GO_TO_TOP_OFFSET) {\n document.documentElement.classList.remove(\"show-back-to-top\");\n } else {\n if (positionY < lastScrollTop) {\n document.documentElement.classList.add(\"show-back-to-top\");\n } else if (positionY > lastScrollTop) {\n document.documentElement.classList.remove(\"show-back-to-top\");\n }\n }\n lastScrollTop = positionY;\n}\n\nfunction scrollHandlerForTOC(positionY) {\n if (tocScroll === null) {\n return;\n }\n\n // top of page.\n if (positionY == 0) {\n tocScroll.scrollTo(0, 0);\n } else if (\n // bottom of page.\n Math.ceil(positionY) >=\n Math.floor(document.documentElement.scrollHeight - window.innerHeight)\n ) {\n tocScroll.scrollTo(0, tocScroll.scrollHeight);\n } else {\n // somewhere in the middle.\n const current = document.querySelector(\".scroll-current\");\n if (current == null) {\n return;\n }\n\n // https://github.com/pypa/pip/issues/9159 This breaks scroll behaviours.\n // // scroll the currently \"active\" heading in toc, into view.\n // const rect = current.getBoundingClientRect();\n // if (0 > rect.top) {\n // current.scrollIntoView(true); // the argument is \"alignTop\"\n // } else if (rect.bottom > window.innerHeight) {\n // current.scrollIntoView(false);\n // }\n }\n}\n\nfunction scrollHandler(positionY) {\n scrollHandlerForHeader();\n scrollHandlerForBackToTop(positionY);\n scrollHandlerForTOC(positionY);\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Theme Toggle\n////////////////////////////////////////////////////////////////////////////////\nfunction setTheme(mode) {\n if (mode !== \"light\" && mode !== \"dark\" && mode !== \"auto\") {\n console.error(`Got invalid theme mode: ${mode}. Resetting to auto.`);\n mode = \"auto\";\n }\n\n document.body.dataset.theme = mode;\n localStorage.setItem(\"theme\", mode);\n console.log(`Changed to ${mode} mode.`);\n}\n\nfunction cycleThemeOnce() {\n const currentTheme = localStorage.getItem(\"theme\") || \"auto\";\n const prefersDark = window.matchMedia(\"(prefers-color-scheme: dark)\").matches;\n\n if (prefersDark) {\n // Auto (dark) -> Light -> Dark\n if (currentTheme === \"auto\") {\n setTheme(\"light\");\n } else if (currentTheme == \"light\") {\n setTheme(\"dark\");\n } else {\n setTheme(\"auto\");\n }\n } else {\n // Auto (light) -> Dark -> Light\n if (currentTheme === \"auto\") {\n setTheme(\"dark\");\n } else if (currentTheme == \"dark\") {\n setTheme(\"light\");\n } else {\n setTheme(\"auto\");\n }\n }\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////////////////////////\nfunction setupScrollHandler() {\n // Taken from https://developer.mozilla.org/en-US/docs/Web/API/Document/scroll_event\n let last_known_scroll_position = 0;\n let ticking = false;\n\n window.addEventListener(\"scroll\", function (e) {\n last_known_scroll_position = window.scrollY;\n\n if (!ticking) {\n window.requestAnimationFrame(function () {\n scrollHandler(last_known_scroll_position);\n ticking = false;\n });\n\n ticking = true;\n }\n });\n window.scroll();\n}\n\nfunction setupScrollSpy() {\n if (tocScroll === null) {\n return;\n }\n\n // Scrollspy -- highlight table on contents, based on scroll\n new Gumshoe(\".toc-tree a\", {\n reflow: true,\n recursive: true,\n navClass: \"scroll-current\",\n offset: () => {\n let rem = parseFloat(getComputedStyle(document.documentElement).fontSize);\n return header.getBoundingClientRect().height + 0.5 * rem + 1;\n },\n });\n}\n\nfunction setupTheme() {\n // Attach event handlers for toggling themes\n const buttons = document.getElementsByClassName(\"theme-toggle\");\n Array.from(buttons).forEach((btn) => {\n btn.addEventListener(\"click\", cycleThemeOnce);\n });\n}\n\nfunction setup() {\n setupTheme();\n setupScrollHandler();\n setupScrollSpy();\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Main entrypoint\n////////////////////////////////////////////////////////////////////////////////\nfunction main() {\n document.body.parentNode.classList.remove(\"no-js\");\n\n header = document.querySelector(\"header\");\n tocScroll = document.querySelector(\".toc-scroll\");\n\n setup();\n}\n\ndocument.addEventListener(\"DOMContentLoaded\", main);\n"],"names":["root","g","window","this","defaults","navClass","contentClass","nested","nestedClass","offset","reflow","events","emitEvent","type","elem","detail","settings","event","CustomEvent","bubbles","cancelable","dispatchEvent","getOffsetTop","location","offsetParent","offsetTop","sortContents","contents","sort","item1","item2","content","isInView","bottom","bounds","getBoundingClientRect","parseFloat","getOffset","parseInt","innerHeight","document","documentElement","clientHeight","top","isAtBottom","Math","ceil","pageYOffset","max","body","scrollHeight","offsetHeight","getActive","last","length","item","useLastItem","i","deactivateNested","nav","parentNode","li","closest","classList","remove","deactivate","items","link","activateNested","add","selector","options","navItems","current","timeout","publicAPIs","querySelectorAll","Array","prototype","forEach","call","getElementById","decodeURIComponent","hash","substr","push","active","activate","scrollHandler","cancelAnimationFrame","requestAnimationFrame","detect","resizeHandler","destroy","removeEventListener","merged","arguments","obj","key","hasOwnProperty","extend","setup","addEventListener","factory","__webpack_module_cache__","__webpack_require__","moduleId","cachedModule","undefined","exports","module","__webpack_modules__","n","getter","__esModule","d","a","definition","o","Object","defineProperty","enumerable","get","globalThis","Function","e","prop","tocScroll","header","lastScrollTop","scrollTop","cycleThemeOnce","currentTheme","localStorage","getItem","mode","matchMedia","matches","console","error","dataset","theme","setItem","log","buttons","getElementsByClassName","from","btn","setupTheme","last_known_scroll_position","ticking","scrollY","positionY","floor","scrollHandlerForBackToTop","scrollTo","querySelector","scrollHandlerForTOC","scroll","setupScrollHandler","recursive","rem","getComputedStyle","fontSize","height"],"sourceRoot":""} \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/searchtools.js b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/searchtools.js new file mode 100644 index 0000000000000000000000000000000000000000..97d56a74d8207ec36a96a5e24f4a4b42dd51f6b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/searchtools.js @@ -0,0 +1,566 @@ +/* + * searchtools.js + * ~~~~~~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for the full-text search. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + + const [docName, title, anchor, descr, score, _filename] = item; + + let listItem = document.createElement("li"); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = docUrlRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = docUrlRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms) + ); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = _( + `Search finished, found ${resultCount} page(s) matching the search query.` + ); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent !== undefined) return docContent.textContent; + console.warn( + "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + /** + * execute search (requires search index to be loaded) + */ + query: (query) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + // array of [docname, title, anchor, descr, score, filename] + let results = []; + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + let score = Math.round(100 * queryLower.length / title.length) + results.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id] of foundEntries) { + let score = Math.round(100 * queryLower.length / entry.length) + results.push([ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // lookup as object + objectTerms.forEach((term) => + results.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); + + // now sort the results by score (in opposite order of appearance, since the + // display function below uses pop() to retrieve items) and then + // alphabetically + results.sort((a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; + }); + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + results = results.reverse(); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + const arr = [ + { files: terms[word], score: Scorer.term }, + { files: titleTerms[word], score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord) && !terms[word]) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord) && !titleTerms[word]) + arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); + }); + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, {}); + scoreMap.get(file)[word] = record.score; + }); + }); + + // create the mapping + files.forEach((file) => { + if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) + fileMap.get(file).push(word); + else fileMap.set(file, [word]); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords) => { + const text = Search.htmlToText(htmlText); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/skeleton.css b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/skeleton.css new file mode 100644 index 0000000000000000000000000000000000000000..467c878c62025811f065562a1bcc5bf152d2d0c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/skeleton.css @@ -0,0 +1,296 @@ +/* Some sane resets. */ +html { + height: 100%; +} + +body { + margin: 0; + min-height: 100%; +} + +/* All the flexbox magic! */ +body, +.sb-announcement, +.sb-content, +.sb-main, +.sb-container, +.sb-container__inner, +.sb-article-container, +.sb-footer-content, +.sb-header, +.sb-header-secondary, +.sb-footer { + display: flex; +} + +/* These order things vertically */ +body, +.sb-main, +.sb-article-container { + flex-direction: column; +} + +/* Put elements in the center */ +.sb-header, +.sb-header-secondary, +.sb-container, +.sb-content, +.sb-footer, +.sb-footer-content { + justify-content: center; +} +/* Put elements at the ends */ +.sb-article-container { + justify-content: space-between; +} + +/* These elements grow. */ +.sb-main, +.sb-content, +.sb-container, +article { + flex-grow: 1; +} + +/* Because padding making this wider is not fun */ +article { + box-sizing: border-box; +} + +/* The announcements element should never be wider than the page. */ +.sb-announcement { + max-width: 100%; +} + +.sb-sidebar-primary, +.sb-sidebar-secondary { + flex-shrink: 0; + width: 17rem; +} + +.sb-announcement__inner { + justify-content: center; + + box-sizing: border-box; + height: 3rem; + + overflow-x: auto; + white-space: nowrap; +} + +/* Sidebars, with checkbox-based toggle */ +.sb-sidebar-primary, +.sb-sidebar-secondary { + position: fixed; + height: 100%; + top: 0; +} + +.sb-sidebar-primary { + left: -17rem; + transition: left 250ms ease-in-out; +} +.sb-sidebar-secondary { + right: -17rem; + transition: right 250ms ease-in-out; +} + +.sb-sidebar-toggle { + display: none; +} +.sb-sidebar-overlay { + position: fixed; + top: 0; + width: 0; + height: 0; + + transition: width 0ms ease 250ms, height 0ms ease 250ms, opacity 250ms ease; + + opacity: 0; + background-color: rgba(0, 0, 0, 0.54); +} + +#sb-sidebar-toggle--primary:checked + ~ .sb-sidebar-overlay[for="sb-sidebar-toggle--primary"], +#sb-sidebar-toggle--secondary:checked + ~ .sb-sidebar-overlay[for="sb-sidebar-toggle--secondary"] { + width: 100%; + height: 100%; + opacity: 1; + transition: width 0ms ease, height 0ms ease, opacity 250ms ease; +} + +#sb-sidebar-toggle--primary:checked ~ .sb-container .sb-sidebar-primary { + left: 0; +} +#sb-sidebar-toggle--secondary:checked ~ .sb-container .sb-sidebar-secondary { + right: 0; +} + +/* Full-width mode */ +.drop-secondary-sidebar-for-full-width-content + .hide-when-secondary-sidebar-shown { + display: none !important; +} +.drop-secondary-sidebar-for-full-width-content .sb-sidebar-secondary { + display: none !important; +} + +/* Mobile views */ +.sb-page-width { + width: 100%; +} + +.sb-article-container, +.sb-footer-content__inner, +.drop-secondary-sidebar-for-full-width-content .sb-article, +.drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 100vw; +} + +.sb-article, +.match-content-width { + padding: 0 1rem; + box-sizing: border-box; +} + +@media (min-width: 32rem) { + .sb-article, + .match-content-width { + padding: 0 2rem; + } +} + +/* Tablet views */ +@media (min-width: 42rem) { + .sb-article-container { + width: auto; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 42rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} +@media (min-width: 46rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 46rem; + } + .sb-article, + .match-content-width { + width: 46rem; + } +} +@media (min-width: 50rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 50rem; + } + .sb-article, + .match-content-width { + width: 50rem; + } +} + +/* Tablet views */ +@media (min-width: 59rem) { + .sb-sidebar-secondary { + position: static; + } + .hide-when-secondary-sidebar-shown { + display: none !important; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 59rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} +@media (min-width: 63rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 63rem; + } + .sb-article, + .match-content-width { + width: 46rem; + } +} +@media (min-width: 67rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } + .sb-article, + .match-content-width { + width: 50rem; + } +} + +/* Desktop views */ +@media (min-width: 76rem) { + .sb-sidebar-primary { + position: static; + } + .hide-when-primary-sidebar-shown { + display: none !important; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 59rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} + +/* Full desktop views */ +@media (min-width: 80rem) { + .sb-article, + .match-content-width { + width: 46rem; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 63rem; + } +} + +@media (min-width: 84rem) { + .sb-article, + .match-content-width { + width: 50rem; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } +} + +@media (min-width: 88rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } + .sb-page-width { + width: 88rem; + } +} diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/sphinx_highlight.js b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/sphinx_highlight.js new file mode 100644 index 0000000000000000000000000000000000000000..aae669d7ea6b1dc6edcb5c651f5d095f7a2dc6e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/sphinx_highlight.js @@ -0,0 +1,144 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + parent.insertBefore( + span, + parent.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(SphinxHighlight.highlightSearchWords); +_ready(SphinxHighlight.initEscapeListener); diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/styles/furo-extensions.css b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/styles/furo-extensions.css new file mode 100644 index 0000000000000000000000000000000000000000..bc447f228f5247cc450a0e8b1f41173d3a264f5a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/styles/furo-extensions.css @@ -0,0 +1,2 @@ +#furo-sidebar-ad-placement{padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)}#furo-sidebar-ad-placement .ethical-sidebar{background:var(--color-background-secondary);border:none;box-shadow:none}#furo-sidebar-ad-placement .ethical-sidebar:hover{background:var(--color-background-hover)}#furo-sidebar-ad-placement .ethical-sidebar a{color:var(--color-foreground-primary)}#furo-sidebar-ad-placement .ethical-callout a{color:var(--color-foreground-secondary)!important}#furo-readthedocs-versions{background:transparent;display:block;position:static;width:100%}#furo-readthedocs-versions .rst-versions{background:#1a1c1e}#furo-readthedocs-versions .rst-current-version{background:var(--color-sidebar-item-background);cursor:unset}#furo-readthedocs-versions .rst-current-version:hover{background:var(--color-sidebar-item-background)}#furo-readthedocs-versions .rst-current-version .fa-book{color:var(--color-foreground-primary)}#furo-readthedocs-versions>.rst-other-versions{padding:0}#furo-readthedocs-versions>.rst-other-versions small{opacity:1}#furo-readthedocs-versions .injected .rst-versions{position:unset}#furo-readthedocs-versions:focus-within,#furo-readthedocs-versions:hover{box-shadow:0 0 0 1px var(--color-sidebar-background-border)}#furo-readthedocs-versions:focus-within .rst-current-version,#furo-readthedocs-versions:hover .rst-current-version{background:#1a1c1e;font-size:inherit;height:auto;line-height:inherit;padding:12px;text-align:right}#furo-readthedocs-versions:focus-within .rst-current-version .fa-book,#furo-readthedocs-versions:hover .rst-current-version .fa-book{color:#fff;float:left}#furo-readthedocs-versions:focus-within .fa-caret-down,#furo-readthedocs-versions:hover .fa-caret-down{display:none}#furo-readthedocs-versions:focus-within .injected,#furo-readthedocs-versions:focus-within .rst-current-version,#furo-readthedocs-versions:focus-within .rst-other-versions,#furo-readthedocs-versions:hover .injected,#furo-readthedocs-versions:hover .rst-current-version,#furo-readthedocs-versions:hover .rst-other-versions{display:block}#furo-readthedocs-versions:focus-within>.rst-current-version,#furo-readthedocs-versions:hover>.rst-current-version{display:none}.highlight:hover button.copybtn{color:var(--color-code-foreground)}.highlight button.copybtn{align-items:center;background-color:var(--color-code-background);border:none;color:var(--color-background-item);cursor:pointer;height:1.25em;opacity:1;right:.5rem;top:.625rem;transition:color .3s,opacity .3s;width:1.25em}.highlight button.copybtn:hover{background-color:var(--color-code-background);color:var(--color-brand-content)}.highlight button.copybtn:after{background-color:transparent;color:var(--color-code-foreground);display:none}.highlight button.copybtn.success{color:#22863a;transition:color 0ms}.highlight button.copybtn.success:after{display:block}.highlight button.copybtn svg{padding:0}body{--sd-color-primary:var(--color-brand-primary);--sd-color-primary-highlight:var(--color-brand-content);--sd-color-primary-text:var(--color-background-primary);--sd-color-shadow:rgba(0,0,0,.05);--sd-color-card-border:var(--color-card-border);--sd-color-card-border-hover:var(--color-brand-content);--sd-color-card-background:var(--color-card-background);--sd-color-card-text:var(--color-foreground-primary);--sd-color-card-header:var(--color-card-marginals-background);--sd-color-card-footer:var(--color-card-marginals-background);--sd-color-tabs-label-active:var(--color-brand-content);--sd-color-tabs-label-hover:var(--color-foreground-muted);--sd-color-tabs-label-inactive:var(--color-foreground-muted);--sd-color-tabs-underline-active:var(--color-brand-content);--sd-color-tabs-underline-hover:var(--color-foreground-border);--sd-color-tabs-underline-inactive:var(--color-background-border);--sd-color-tabs-overline:var(--color-background-border);--sd-color-tabs-underline:var(--color-background-border)}.sd-tab-content{box-shadow:0 -2px var(--sd-color-tabs-overline),0 1px var(--sd-color-tabs-underline)}.sd-card{box-shadow:0 .1rem .25rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)}.sd-shadow-sm{box-shadow:0 .1rem .25rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-shadow-md{box-shadow:0 .3rem .75rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-shadow-lg{box-shadow:0 .6rem 1.5rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-card-hover:hover{transform:none}.sd-cards-carousel{gap:.25rem;padding:.25rem}body{--tabs--label-text:var(--color-foreground-muted);--tabs--label-text--hover:var(--color-foreground-muted);--tabs--label-text--active:var(--color-brand-content);--tabs--label-text--active--hover:var(--color-brand-content);--tabs--label-background:transparent;--tabs--label-background--hover:transparent;--tabs--label-background--active:transparent;--tabs--label-background--active--hover:transparent;--tabs--padding-x:0.25em;--tabs--margin-x:1em;--tabs--border:var(--color-background-border);--tabs--label-border:transparent;--tabs--label-border--hover:var(--color-foreground-muted);--tabs--label-border--active:var(--color-brand-content);--tabs--label-border--active--hover:var(--color-brand-content)}[role=main] .container{max-width:none;padding-left:0;padding-right:0}.shadow.docutils{border:none;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1)!important}.sphinx-bs .card{background-color:var(--color-background-secondary);color:var(--color-foreground)} +/*# sourceMappingURL=furo-extensions.css.map*/ \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/styles/furo-extensions.css.map b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/styles/furo-extensions.css.map new file mode 100644 index 0000000000000000000000000000000000000000..9ba5637f9a43ef8158b7ed724764df79a3144b08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/styles/furo-extensions.css.map @@ -0,0 +1 @@ +{"version":3,"file":"styles/furo-extensions.css","mappings":"AAGA,2BACE,oFACA,4CAKE,6CAHA,YACA,eAEA,CACA,kDACE,yCAEF,8CACE,sCAEJ,8CACE,kDAEJ,2BAGE,uBACA,cAHA,gBACA,UAEA,CAGA,yCACE,mBAEF,gDAEE,gDADA,YACA,CACA,sDACE,gDACF,yDACE,sCAEJ,+CACE,UACA,qDACE,UAGF,mDACE,eAEJ,yEAEE,4DAEA,mHASE,mBAPA,kBAEA,YADA,oBAGA,aADA,gBAIA,CAEA,qIAEE,WADA,UACA,CAEJ,uGACE,aAEF,iUAGE,cAEF,mHACE,aC1EJ,gCACE,mCAEF,0BAKE,mBAUA,8CACA,YAFA,mCAKA,eAZA,cALA,UASA,YADA,YAYA,iCAdA,YAcA,CAEA,gCAEE,8CADA,gCACA,CAEF,gCAGE,6BADA,mCADA,YAEA,CAEF,kCAEE,cADA,oBACA,CACA,wCACE,cAEJ,8BACE,UC5CN,KAEE,6CAA8C,CAC9C,uDAAwD,CACxD,uDAAwD,CAGxD,iCAAsC,CAGtC,+CAAgD,CAChD,uDAAwD,CACxD,uDAAwD,CACxD,oDAAqD,CACrD,6DAA8D,CAC9D,6DAA8D,CAG9D,uDAAwD,CACxD,yDAA0D,CAC1D,4DAA6D,CAC7D,2DAA4D,CAC5D,8DAA+D,CAC/D,iEAAkE,CAClE,uDAAwD,CACxD,wDAAyD,CAG3D,gBACE,qFAGF,SACE,6EAEF,cACE,uFAEF,cACE,uFAEF,cACE,uFAGF,qBACE,eAEF,mBACE,WACA,eChDF,KACE,gDAAiD,CACjD,uDAAwD,CACxD,qDAAsD,CACtD,4DAA6D,CAC7D,oCAAqC,CACrC,2CAA4C,CAC5C,4CAA6C,CAC7C,mDAAoD,CACpD,wBAAyB,CACzB,oBAAqB,CACrB,6CAA8C,CAC9C,gCAAiC,CACjC,yDAA0D,CAC1D,uDAAwD,CACxD,8DAA+D,CCbjE,uBACE,eACA,eACA,gBAGF,iBACE,YACA,+EAGF,iBACE,mDACA","sources":["webpack:///./src/furo/assets/styles/extensions/_readthedocs.sass","webpack:///./src/furo/assets/styles/extensions/_copybutton.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-design.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-inline-tabs.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-panels.sass"],"sourcesContent":["// This file contains the styles used for tweaking how ReadTheDoc's embedded\n// contents would show up inside the theme.\n\n#furo-sidebar-ad-placement\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n .ethical-sidebar\n // Remove the border and box-shadow.\n border: none\n box-shadow: none\n // Manage the background colors.\n background: var(--color-background-secondary)\n &:hover\n background: var(--color-background-hover)\n // Ensure the text is legible.\n a\n color: var(--color-foreground-primary)\n\n .ethical-callout a\n color: var(--color-foreground-secondary) !important\n\n#furo-readthedocs-versions\n position: static\n width: 100%\n background: transparent\n display: block\n\n // Make the background color fit with the theme's aesthetic.\n .rst-versions\n background: rgb(26, 28, 30)\n\n .rst-current-version\n cursor: unset\n background: var(--color-sidebar-item-background)\n &:hover\n background: var(--color-sidebar-item-background)\n .fa-book\n color: var(--color-foreground-primary)\n\n > .rst-other-versions\n padding: 0\n small\n opacity: 1\n\n .injected\n .rst-versions\n position: unset\n\n &:hover,\n &:focus-within\n box-shadow: 0 0 0 1px var(--color-sidebar-background-border)\n\n .rst-current-version\n // Undo the tweaks done in RTD's CSS\n font-size: inherit\n line-height: inherit\n height: auto\n text-align: right\n padding: 12px\n\n // Match the rest of the body\n background: #1a1c1e\n\n .fa-book\n float: left\n color: white\n\n .fa-caret-down\n display: none\n\n .rst-current-version,\n .rst-other-versions,\n .injected\n display: block\n\n > .rst-current-version\n display: none\n",".highlight\n &:hover button.copybtn\n color: var(--color-code-foreground)\n\n button.copybtn\n // Make it visible\n opacity: 1\n\n // Align things correctly\n align-items: center\n\n height: 1.25em\n width: 1.25em\n\n top: 0.625rem // $code-spacing-vertical\n right: 0.5rem\n\n // Make it look better\n color: var(--color-background-item)\n background-color: var(--color-code-background)\n border: none\n\n // Change to cursor to make it obvious that you can click on it\n cursor: pointer\n\n // Transition smoothly, for aesthetics\n transition: color 300ms, opacity 300ms\n\n &:hover\n color: var(--color-brand-content)\n background-color: var(--color-code-background)\n\n &::after\n display: none\n color: var(--color-code-foreground)\n background-color: transparent\n\n &.success\n transition: color 0ms\n color: #22863a\n &::after\n display: block\n\n svg\n padding: 0\n","body\n // Colors\n --sd-color-primary: var(--color-brand-primary)\n --sd-color-primary-highlight: var(--color-brand-content)\n --sd-color-primary-text: var(--color-background-primary)\n\n // Shadows\n --sd-color-shadow: rgba(0, 0, 0, 0.05)\n\n // Cards\n --sd-color-card-border: var(--color-card-border)\n --sd-color-card-border-hover: var(--color-brand-content)\n --sd-color-card-background: var(--color-card-background)\n --sd-color-card-text: var(--color-foreground-primary)\n --sd-color-card-header: var(--color-card-marginals-background)\n --sd-color-card-footer: var(--color-card-marginals-background)\n\n // Tabs\n --sd-color-tabs-label-active: var(--color-brand-content)\n --sd-color-tabs-label-hover: var(--color-foreground-muted)\n --sd-color-tabs-label-inactive: var(--color-foreground-muted)\n --sd-color-tabs-underline-active: var(--color-brand-content)\n --sd-color-tabs-underline-hover: var(--color-foreground-border)\n --sd-color-tabs-underline-inactive: var(--color-background-border)\n --sd-color-tabs-overline: var(--color-background-border)\n --sd-color-tabs-underline: var(--color-background-border)\n\n// Tabs\n.sd-tab-content\n box-shadow: 0 -2px var(--sd-color-tabs-overline), 0 1px var(--sd-color-tabs-underline)\n\n// Shadows\n.sd-card // Have a shadow by default\n box-shadow: 0 0.1rem 0.25rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n.sd-shadow-sm\n box-shadow: 0 0.1rem 0.25rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n.sd-shadow-md\n box-shadow: 0 0.3rem 0.75rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n.sd-shadow-lg\n box-shadow: 0 0.6rem 1.5rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n// Cards\n.sd-card-hover:hover // Don't change scale on hover\n transform: none\n\n.sd-cards-carousel // Have a bit of gap in the carousel by default\n gap: 0.25rem\n padding: 0.25rem\n","// This file contains styles to tweak sphinx-inline-tabs to work well with Furo.\n\nbody\n --tabs--label-text: var(--color-foreground-muted)\n --tabs--label-text--hover: var(--color-foreground-muted)\n --tabs--label-text--active: var(--color-brand-content)\n --tabs--label-text--active--hover: var(--color-brand-content)\n --tabs--label-background: transparent\n --tabs--label-background--hover: transparent\n --tabs--label-background--active: transparent\n --tabs--label-background--active--hover: transparent\n --tabs--padding-x: 0.25em\n --tabs--margin-x: 1em\n --tabs--border: var(--color-background-border)\n --tabs--label-border: transparent\n --tabs--label-border--hover: var(--color-foreground-muted)\n --tabs--label-border--active: var(--color-brand-content)\n --tabs--label-border--active--hover: var(--color-brand-content)\n","// This file contains styles to tweak sphinx-panels to work well with Furo.\n\n// sphinx-panels includes Bootstrap 4, which uses .container which can conflict\n// with docutils' `.. container::` directive.\n[role=\"main\"] .container\n max-width: initial\n padding-left: initial\n padding-right: initial\n\n// Make the panels look nicer!\n.shadow.docutils\n border: none\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n// Make panel colors respond to dark mode\n.sphinx-bs .card\n background-color: var(--color-background-secondary)\n color: var(--color-foreground)\n"],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/styles/furo.css b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/styles/furo.css new file mode 100644 index 0000000000000000000000000000000000000000..b30989daf5fafa6b3139f90d51160b3f2403487a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/styles/furo.css @@ -0,0 +1,2 @@ +/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */html{-webkit-text-size-adjust:100%;line-height:1.15}body{margin:0}main{display:block}h1{font-size:2em;margin:.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}[hidden],template{display:none}@media print{.content-icon-container,.headerlink,.mobile-header,.related-pages{display:none!important}.highlight{border:.1pt solid var(--color-foreground-border)}a,blockquote,dl,ol,pre,table,ul{page-break-inside:avoid}caption,figure,h1,h2,h3,h4,h5,h6,img{page-break-after:avoid;page-break-inside:avoid}dl,ol,ul{page-break-before:avoid}}.visually-hidden{clip:rect(0,0,0,0)!important;border:0!important;height:1px!important;margin:-1px!important;overflow:hidden!important;padding:0!important;position:absolute!important;white-space:nowrap!important;width:1px!important}:-moz-focusring{outline:auto}body{--font-stack:-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji;--font-stack--monospace:"SFMono-Regular",Menlo,Consolas,Monaco,Liberation Mono,Lucida Console,monospace;--font-size--normal:100%;--font-size--small:87.5%;--font-size--small--2:81.25%;--font-size--small--3:75%;--font-size--small--4:62.5%;--sidebar-caption-font-size:var(--font-size--small--2);--sidebar-item-font-size:var(--font-size--small);--sidebar-search-input-font-size:var(--font-size--small);--toc-font-size:var(--font-size--small--3);--toc-font-size--mobile:var(--font-size--normal);--toc-title-font-size:var(--font-size--small--4);--admonition-font-size:0.8125rem;--admonition-title-font-size:0.8125rem;--code-font-size:var(--font-size--small--2);--api-font-size:var(--font-size--small);--header-height:calc(var(--sidebar-item-line-height) + var(--sidebar-item-spacing-vertical)*4);--header-padding:0.5rem;--sidebar-tree-space-above:1.5rem;--sidebar-caption-space-above:1rem;--sidebar-item-line-height:1rem;--sidebar-item-spacing-vertical:0.5rem;--sidebar-item-spacing-horizontal:1rem;--sidebar-item-height:calc(var(--sidebar-item-line-height) + var(--sidebar-item-spacing-vertical)*2);--sidebar-expander-width:var(--sidebar-item-height);--sidebar-search-space-above:0.5rem;--sidebar-search-input-spacing-vertical:0.5rem;--sidebar-search-input-spacing-horizontal:0.5rem;--sidebar-search-input-height:1rem;--sidebar-search-icon-size:var(--sidebar-search-input-height);--toc-title-padding:0.25rem 0;--toc-spacing-vertical:1.5rem;--toc-spacing-horizontal:1.5rem;--toc-item-spacing-vertical:0.4rem;--toc-item-spacing-horizontal:1rem;--icon-search:url('data:image/svg+xml;charset=utf-8,');--icon-pencil:url('data:image/svg+xml;charset=utf-8,');--icon-abstract:url('data:image/svg+xml;charset=utf-8,');--icon-info:url('data:image/svg+xml;charset=utf-8,');--icon-flame:url('data:image/svg+xml;charset=utf-8,');--icon-question:url('data:image/svg+xml;charset=utf-8,');--icon-warning:url('data:image/svg+xml;charset=utf-8,');--icon-failure:url('data:image/svg+xml;charset=utf-8,');--icon-spark:url('data:image/svg+xml;charset=utf-8,');--color-admonition-title--caution:#ff9100;--color-admonition-title-background--caution:rgba(255,145,0,.2);--color-admonition-title--warning:#ff9100;--color-admonition-title-background--warning:rgba(255,145,0,.2);--color-admonition-title--danger:#ff5252;--color-admonition-title-background--danger:rgba(255,82,82,.2);--color-admonition-title--attention:#ff5252;--color-admonition-title-background--attention:rgba(255,82,82,.2);--color-admonition-title--error:#ff5252;--color-admonition-title-background--error:rgba(255,82,82,.2);--color-admonition-title--hint:#00c852;--color-admonition-title-background--hint:rgba(0,200,82,.2);--color-admonition-title--tip:#00c852;--color-admonition-title-background--tip:rgba(0,200,82,.2);--color-admonition-title--important:#00bfa5;--color-admonition-title-background--important:rgba(0,191,165,.2);--color-admonition-title--note:#00b0ff;--color-admonition-title-background--note:rgba(0,176,255,.2);--color-admonition-title--seealso:#448aff;--color-admonition-title-background--seealso:rgba(68,138,255,.2);--color-admonition-title--admonition-todo:grey;--color-admonition-title-background--admonition-todo:hsla(0,0%,50%,.2);--color-admonition-title:#651fff;--color-admonition-title-background:rgba(101,31,255,.2);--icon-admonition-default:var(--icon-abstract);--color-topic-title:#14b8a6;--color-topic-title-background:rgba(20,184,166,.2);--icon-topic-default:var(--icon-pencil);--color-problematic:#b30000;--color-foreground-primary:#000;--color-foreground-secondary:#5a5c63;--color-foreground-muted:#646776;--color-foreground-border:#878787;--color-background-primary:#fff;--color-background-secondary:#f8f9fb;--color-background-hover:#efeff4;--color-background-hover--transparent:#efeff400;--color-background-border:#eeebee;--color-background-item:#ccc;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#2962ff;--color-brand-content:#2a5adf;--color-api-background:var(--color-background-hover--transparent);--color-api-background-hover:var(--color-background-hover);--color-api-overall:var(--color-foreground-secondary);--color-api-name:var(--color-problematic);--color-api-pre-name:var(--color-problematic);--color-api-paren:var(--color-foreground-secondary);--color-api-keyword:var(--color-foreground-primary);--color-highlight-on-target:#ffc;--color-inline-code-background:var(--color-background-secondary);--color-highlighted-background:#def;--color-highlighted-text:var(--color-foreground-primary);--color-guilabel-background:#ddeeff80;--color-guilabel-border:#bedaf580;--color-guilabel-text:var(--color-foreground-primary);--color-admonition-background:transparent;--color-table-header-background:var(--color-background-secondary);--color-table-border:var(--color-background-border);--color-card-border:var(--color-background-secondary);--color-card-background:transparent;--color-card-marginals-background:var(--color-background-secondary);--color-header-background:var(--color-background-primary);--color-header-border:var(--color-background-border);--color-header-text:var(--color-foreground-primary);--color-sidebar-background:var(--color-background-secondary);--color-sidebar-background-border:var(--color-background-border);--color-sidebar-brand-text:var(--color-foreground-primary);--color-sidebar-caption-text:var(--color-foreground-muted);--color-sidebar-link-text:var(--color-foreground-secondary);--color-sidebar-link-text--top-level:var(--color-brand-primary);--color-sidebar-item-background:var(--color-sidebar-background);--color-sidebar-item-background--current:var( --color-sidebar-item-background );--color-sidebar-item-background--hover:linear-gradient(90deg,var(--color-background-hover--transparent) 0%,var(--color-background-hover) var(--sidebar-item-spacing-horizontal),var(--color-background-hover) 100%);--color-sidebar-item-expander-background:transparent;--color-sidebar-item-expander-background--hover:var( --color-background-hover );--color-sidebar-search-text:var(--color-foreground-primary);--color-sidebar-search-background:var(--color-background-secondary);--color-sidebar-search-background--focus:var(--color-background-primary);--color-sidebar-search-border:var(--color-background-border);--color-sidebar-search-icon:var(--color-foreground-muted);--color-toc-background:var(--color-background-primary);--color-toc-title-text:var(--color-foreground-muted);--color-toc-item-text:var(--color-foreground-secondary);--color-toc-item-text--hover:var(--color-foreground-primary);--color-toc-item-text--active:var(--color-brand-primary);--color-content-foreground:var(--color-foreground-primary);--color-content-background:transparent;--color-link:var(--color-brand-content);--color-link--hover:var(--color-brand-content);--color-link-underline:var(--color-background-border);--color-link-underline--hover:var(--color-foreground-border)}.only-light{display:block!important}html body .only-dark{display:none!important}@media not print{body[data-theme=dark]{--color-problematic:#ee5151;--color-foreground-primary:#ffffffcc;--color-foreground-secondary:#9ca0a5;--color-foreground-muted:#81868d;--color-foreground-border:#666;--color-background-primary:#131416;--color-background-secondary:#1a1c1e;--color-background-hover:#1e2124;--color-background-hover--transparent:#1e212400;--color-background-border:#303335;--color-background-item:#444;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#2b8cee;--color-brand-content:#368ce2;--color-highlighted-background:#083563;--color-guilabel-background:#08356380;--color-guilabel-border:#13395f80;--color-api-keyword:var(--color-foreground-secondary);--color-highlight-on-target:#330;--color-admonition-background:#18181a;--color-card-border:var(--color-background-secondary);--color-card-background:#18181a;--color-card-marginals-background:var(--color-background-hover)}html body[data-theme=dark] .only-light{display:none!important}body[data-theme=dark] .only-dark{display:block!important}@media(prefers-color-scheme:dark){body:not([data-theme=light]){--color-problematic:#ee5151;--color-foreground-primary:#ffffffcc;--color-foreground-secondary:#9ca0a5;--color-foreground-muted:#81868d;--color-foreground-border:#666;--color-background-primary:#131416;--color-background-secondary:#1a1c1e;--color-background-hover:#1e2124;--color-background-hover--transparent:#1e212400;--color-background-border:#303335;--color-background-item:#444;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#2b8cee;--color-brand-content:#368ce2;--color-highlighted-background:#083563;--color-guilabel-background:#08356380;--color-guilabel-border:#13395f80;--color-api-keyword:var(--color-foreground-secondary);--color-highlight-on-target:#330;--color-admonition-background:#18181a;--color-card-border:var(--color-background-secondary);--color-card-background:#18181a;--color-card-marginals-background:var(--color-background-hover)}html body:not([data-theme=light]) .only-light{display:none!important}body:not([data-theme=light]) .only-dark{display:block!important}}}body[data-theme=auto] .theme-toggle svg.theme-icon-when-auto,body[data-theme=dark] .theme-toggle svg.theme-icon-when-dark,body[data-theme=light] .theme-toggle svg.theme-icon-when-light{display:block}body{font-family:var(--font-stack)}code,kbd,pre,samp{font-family:var(--font-stack--monospace)}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}article{line-height:1.5}h1,h2,h3,h4,h5,h6{border-radius:.5rem;font-weight:700;line-height:1.25;margin:.5rem -.5rem;padding-left:.5rem;padding-right:.5rem}h1+p,h2+p,h3+p,h4+p,h5+p,h6+p{margin-top:0}h1{font-size:2.5em;margin-bottom:1rem}h1,h2{margin-top:1.75rem}h2{font-size:2em}h3{font-size:1.5em}h4{font-size:1.25em}h5{font-size:1.125em}h6{font-size:1em}small{font-size:80%;opacity:75%}p{margin-bottom:.75rem;margin-top:.5rem}hr.docutils{background-color:var(--color-background-border);border:0;height:1px;margin:2rem 0;padding:0}.centered{text-align:center}a{color:var(--color-link);text-decoration:underline;-webkit-text-decoration-color:var(--color-link-underline);text-decoration-color:var(--color-link-underline)}a:hover{color:var(--color-link--hover);-webkit-text-decoration-color:var(--color-link-underline--hover);text-decoration-color:var(--color-link-underline--hover)}a.muted-link{color:inherit}a.muted-link:hover{color:var(--color-link);-webkit-text-decoration-color:var(--color-link-underline--hover);text-decoration-color:var(--color-link-underline--hover)}html{overflow-x:hidden;overflow-y:scroll;scroll-behavior:smooth}.sidebar-scroll,.toc-scroll,article[role=main] *{scrollbar-color:var(--color-foreground-border) transparent;scrollbar-width:thin}.sidebar-scroll::-webkit-scrollbar,.toc-scroll::-webkit-scrollbar,article[role=main] ::-webkit-scrollbar{height:.25rem;width:.25rem}.sidebar-scroll::-webkit-scrollbar-thumb,.toc-scroll::-webkit-scrollbar-thumb,article[role=main] ::-webkit-scrollbar-thumb{background-color:var(--color-foreground-border);border-radius:.125rem}body,html{background:var(--color-background-primary);color:var(--color-foreground-primary);height:100%}article{background:var(--color-content-background);color:var(--color-content-foreground)}.page{display:flex;min-height:100%}.mobile-header{background-color:var(--color-header-background);border-bottom:1px solid var(--color-header-border);color:var(--color-header-text);display:none;height:var(--header-height);width:100%;z-index:10}.mobile-header.scrolled{border-bottom:none;box-shadow:0 0 .2rem rgba(0,0,0,.1),0 .2rem .4rem rgba(0,0,0,.2)}.mobile-header .header-center a{color:var(--color-header-text);text-decoration:none}.main{display:flex;flex:1}.sidebar-drawer{background:var(--color-sidebar-background);border-right:1px solid var(--color-sidebar-background-border);box-sizing:border-box;display:flex;justify-content:flex-end;min-width:15em;width:calc(50% - 26em)}.sidebar-container,.toc-drawer{box-sizing:border-box;width:15em}.toc-drawer{background:var(--color-toc-background);padding-right:1rem}.sidebar-sticky,.toc-sticky{display:flex;flex-direction:column;height:min(100%,100vh);height:100vh;position:-webkit-sticky;position:sticky;top:0}.sidebar-scroll,.toc-scroll{flex-grow:1;flex-shrink:1;overflow:auto;scroll-behavior:smooth}.content{display:flex;flex-direction:column;justify-content:space-between;padding:0 3em;width:46em}.icon{display:inline-block;height:1rem;width:1rem}.icon svg{height:100%;width:100%}.announcement{align-items:center;background-color:var(--color-announcement-background);color:var(--color-announcement-text);display:flex;height:var(--header-height);overflow-x:auto}.announcement+.page{min-height:calc(100% - var(--header-height))}.announcement-content{box-sizing:border-box;min-width:100%;padding:.5rem;text-align:center;white-space:nowrap}.announcement-content a{color:var(--color-announcement-text);-webkit-text-decoration-color:var(--color-announcement-text);text-decoration-color:var(--color-announcement-text)}.announcement-content a:hover{color:var(--color-announcement-text);-webkit-text-decoration-color:var(--color-link--hover);text-decoration-color:var(--color-link--hover)}.no-js .theme-toggle-container{display:none}.theme-toggle-container{vertical-align:middle}.theme-toggle{background:transparent;border:none;cursor:pointer;padding:0}.theme-toggle svg{color:var(--color-foreground-primary);display:none;height:1rem;vertical-align:middle;width:1rem}.theme-toggle-header{float:left;padding:1rem .5rem}.nav-overlay-icon,.toc-overlay-icon{cursor:pointer;display:none}.nav-overlay-icon .icon,.toc-overlay-icon .icon{color:var(--color-foreground-secondary);height:1rem;width:1rem}.nav-overlay-icon,.toc-header-icon{align-items:center;justify-content:center}.toc-content-icon{height:1.5rem;width:1.5rem}.content-icon-container{display:flex;float:right;gap:.5rem;margin-bottom:1rem;margin-left:1rem;margin-top:1.5rem}.content-icon-container .edit-this-page svg{color:inherit;height:1rem;width:1rem}.sidebar-toggle{display:none;position:absolute}.sidebar-toggle[name=__toc]{left:20px}.sidebar-toggle:checked{left:40px}.overlay{background-color:rgba(0,0,0,.54);height:0;opacity:0;position:fixed;top:0;transition:width 0ms,height 0ms,opacity .25s ease-out;width:0}.sidebar-overlay{z-index:20}.toc-overlay{z-index:40}.sidebar-drawer{transition:left .25s ease-in-out;z-index:30}.toc-drawer{transition:right .25s ease-in-out;z-index:50}#__navigation:checked~.sidebar-overlay{height:100%;opacity:1;width:100%}#__navigation:checked~.page .sidebar-drawer{left:0;top:0}#__toc:checked~.toc-overlay{height:100%;opacity:1;width:100%}#__toc:checked~.page .toc-drawer{right:0;top:0}.back-to-top{background:var(--color-background-primary);border-radius:1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 1px 0 hsla(220,9%,46%,.502);display:none;font-size:.8125rem;left:0;margin-left:50%;padding:.5rem .75rem .5rem .5rem;position:fixed;text-decoration:none;top:1rem;transform:translateX(-50%);z-index:10}.back-to-top svg{fill:currentColor;display:inline-block;height:1rem;width:1rem}.back-to-top span{margin-left:.25rem}.show-back-to-top .back-to-top{align-items:center;display:flex}@media(min-width:97em){html{font-size:110%}}@media(max-width:82em){.toc-content-icon{display:flex}.toc-drawer{border-left:1px solid var(--color-background-muted);height:100vh;position:fixed;right:-15em;top:0}.toc-tree{border-left:none;font-size:var(--toc-font-size--mobile)}.sidebar-drawer{width:calc(50% - 18.5em)}}@media(max-width:67em){.nav-overlay-icon{display:flex}.sidebar-drawer{height:100vh;left:-15em;position:fixed;top:0;width:15em}.toc-header-icon{display:flex}.theme-toggle-content,.toc-content-icon{display:none}.theme-toggle-header{display:block}.mobile-header{align-items:center;display:flex;justify-content:space-between;position:-webkit-sticky;position:sticky;top:0}.mobile-header .header-left,.mobile-header .header-right{display:flex;height:var(--header-height);padding:0 var(--header-padding)}.mobile-header .header-left label,.mobile-header .header-right label{height:100%;-webkit-user-select:none;-moz-user-select:none;user-select:none;width:100%}.nav-overlay-icon .icon,.theme-toggle svg{height:1.25rem;width:1.25rem}:target{scroll-margin-top:var(--header-height)}.back-to-top{top:calc(var(--header-height) + .5rem)}.page{flex-direction:column;justify-content:center}.content{margin-left:auto;margin-right:auto}}@media(max-width:52em){.content{overflow-x:auto;width:100%}}@media(max-width:46em){.content{padding:0 1em}article aside.sidebar{float:none;margin:1rem 0;width:100%}}.admonition,.topic{background:var(--color-admonition-background);border-radius:.2rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1);font-size:var(--admonition-font-size);margin:1rem auto;overflow:hidden;padding:0 .5rem .5rem;page-break-inside:avoid}.admonition>:nth-child(2),.topic>:nth-child(2){margin-top:0}.admonition>:last-child,.topic>:last-child{margin-bottom:0}p.admonition-title,p.topic-title{font-size:var(--admonition-title-font-size);font-weight:500;line-height:1.3;margin:0 -.5rem .5rem;padding:.4rem .5rem .4rem 2rem;position:relative}p.admonition-title:before,p.topic-title:before{content:"";height:1rem;left:.5rem;position:absolute;width:1rem}p.admonition-title{background-color:var(--color-admonition-title-background)}p.admonition-title:before{background-color:var(--color-admonition-title);-webkit-mask-image:var(--icon-admonition-default);mask-image:var(--icon-admonition-default);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat}p.topic-title{background-color:var(--color-topic-title-background)}p.topic-title:before{background-color:var(--color-topic-title);-webkit-mask-image:var(--icon-topic-default);mask-image:var(--icon-topic-default);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat}.admonition{border-left:.2rem solid var(--color-admonition-title)}.admonition.caution{border-left-color:var(--color-admonition-title--caution)}.admonition.caution>.admonition-title{background-color:var(--color-admonition-title-background--caution)}.admonition.caution>.admonition-title:before{background-color:var(--color-admonition-title--caution);-webkit-mask-image:var(--icon-spark);mask-image:var(--icon-spark)}.admonition.warning{border-left-color:var(--color-admonition-title--warning)}.admonition.warning>.admonition-title{background-color:var(--color-admonition-title-background--warning)}.admonition.warning>.admonition-title:before{background-color:var(--color-admonition-title--warning);-webkit-mask-image:var(--icon-warning);mask-image:var(--icon-warning)}.admonition.danger{border-left-color:var(--color-admonition-title--danger)}.admonition.danger>.admonition-title{background-color:var(--color-admonition-title-background--danger)}.admonition.danger>.admonition-title:before{background-color:var(--color-admonition-title--danger);-webkit-mask-image:var(--icon-spark);mask-image:var(--icon-spark)}.admonition.attention{border-left-color:var(--color-admonition-title--attention)}.admonition.attention>.admonition-title{background-color:var(--color-admonition-title-background--attention)}.admonition.attention>.admonition-title:before{background-color:var(--color-admonition-title--attention);-webkit-mask-image:var(--icon-warning);mask-image:var(--icon-warning)}.admonition.error{border-left-color:var(--color-admonition-title--error)}.admonition.error>.admonition-title{background-color:var(--color-admonition-title-background--error)}.admonition.error>.admonition-title:before{background-color:var(--color-admonition-title--error);-webkit-mask-image:var(--icon-failure);mask-image:var(--icon-failure)}.admonition.hint{border-left-color:var(--color-admonition-title--hint)}.admonition.hint>.admonition-title{background-color:var(--color-admonition-title-background--hint)}.admonition.hint>.admonition-title:before{background-color:var(--color-admonition-title--hint);-webkit-mask-image:var(--icon-question);mask-image:var(--icon-question)}.admonition.tip{border-left-color:var(--color-admonition-title--tip)}.admonition.tip>.admonition-title{background-color:var(--color-admonition-title-background--tip)}.admonition.tip>.admonition-title:before{background-color:var(--color-admonition-title--tip);-webkit-mask-image:var(--icon-info);mask-image:var(--icon-info)}.admonition.important{border-left-color:var(--color-admonition-title--important)}.admonition.important>.admonition-title{background-color:var(--color-admonition-title-background--important)}.admonition.important>.admonition-title:before{background-color:var(--color-admonition-title--important);-webkit-mask-image:var(--icon-flame);mask-image:var(--icon-flame)}.admonition.note{border-left-color:var(--color-admonition-title--note)}.admonition.note>.admonition-title{background-color:var(--color-admonition-title-background--note)}.admonition.note>.admonition-title:before{background-color:var(--color-admonition-title--note);-webkit-mask-image:var(--icon-pencil);mask-image:var(--icon-pencil)}.admonition.seealso{border-left-color:var(--color-admonition-title--seealso)}.admonition.seealso>.admonition-title{background-color:var(--color-admonition-title-background--seealso)}.admonition.seealso>.admonition-title:before{background-color:var(--color-admonition-title--seealso);-webkit-mask-image:var(--icon-info);mask-image:var(--icon-info)}.admonition.admonition-todo{border-left-color:var(--color-admonition-title--admonition-todo)}.admonition.admonition-todo>.admonition-title{background-color:var(--color-admonition-title-background--admonition-todo)}.admonition.admonition-todo>.admonition-title:before{background-color:var(--color-admonition-title--admonition-todo);-webkit-mask-image:var(--icon-pencil);mask-image:var(--icon-pencil)}.admonition-todo>.admonition-title{text-transform:uppercase}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd{margin-left:2rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd>:first-child{margin-top:.125rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list,dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd>:last-child{margin-bottom:.75rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list>dt{font-size:var(--font-size--small);text-transform:uppercase}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd:empty{margin-bottom:.5rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul{margin-left:-1.2rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul>li>p:nth-child(2){margin-top:0}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul>li>p+p:last-child:empty{margin-bottom:0;margin-top:0}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt{color:var(--color-api-overall)}.sig:not(.sig-inline){background:var(--color-api-background);border-radius:.25rem;font-family:var(--font-stack--monospace);font-size:var(--api-font-size);font-weight:700;margin-left:-.25rem;margin-right:-.25rem;padding:.25rem .5rem .25rem 3em;text-indent:-2.5em;transition:background .1s ease-out}.sig:not(.sig-inline):hover{background:var(--color-api-background-hover)}.sig:not(.sig-inline) a.reference .viewcode-link{font-weight:400;width:3.5rem}.sig:not(.sig-inline) span.pre{overflow-wrap:anywhere}em.property{font-style:normal}em.property:first-child{color:var(--color-api-keyword)}.sig-name{color:var(--color-api-name)}.sig-prename{color:var(--color-api-pre-name);font-weight:400}.sig-paren{color:var(--color-api-paren)}.sig-param{font-style:normal}.versionmodified{font-style:italic}div.deprecated p,div.versionadded p,div.versionchanged p{margin-bottom:.125rem;margin-top:.125rem}.viewcode-back,.viewcode-link{float:right;text-align:right}.line-block{margin-bottom:.75rem;margin-top:.5rem}.line-block .line-block{margin-bottom:0;margin-top:0;padding-left:1rem}.code-block-caption,article p.caption,table>caption{font-size:var(--font-size--small);text-align:center}.toctree-wrapper.compound .caption,.toctree-wrapper.compound :not(.caption)>.caption-text{font-size:var(--font-size--small);margin-bottom:0;text-align:initial;text-transform:uppercase}.toctree-wrapper.compound>ul{margin-bottom:0;margin-top:0}.sig-inline,code.literal{background:var(--color-inline-code-background);border-radius:.2em;font-size:var(--font-size--small--2);overflow-wrap:break-word;padding:.1em .2em}p .sig-inline,p code.literal{border:1px solid var(--color-background-border)}.sig-inline{font-family:var(--font-stack--monospace)}div[class*=" highlight-"],div[class^=highlight-]{display:flex;margin:1em 0}div[class*=" highlight-"] .table-wrapper,div[class^=highlight-] .table-wrapper,pre{margin:0;padding:0}pre{overflow:auto}article[role=main] .highlight pre{line-height:1.5}.highlight pre,pre.literal-block{font-size:var(--code-font-size);padding:.625rem .875rem}pre.literal-block{background-color:var(--color-code-background);border-radius:.2rem;color:var(--color-code-foreground);margin-bottom:1rem;margin-top:1rem}.highlight{border-radius:.2rem;width:100%}.highlight .gp,.highlight span.linenos{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none}.highlight .hll{display:block;margin-left:-.875rem;margin-right:-.875rem;padding-left:.875rem;padding-right:.875rem}.code-block-caption{background-color:var(--color-code-background);border-bottom:1px solid;border-radius:.25rem;border-bottom-left-radius:0;border-bottom-right-radius:0;border-color:var(--color-background-border);color:var(--color-code-foreground);display:flex;font-weight:300;padding:.625rem .875rem}.code-block-caption+div[class]{margin-top:0}.code-block-caption+div[class] pre{border-top-left-radius:0;border-top-right-radius:0}.highlighttable{display:block;width:100%}.highlighttable tbody{display:block}.highlighttable tr{display:flex}.highlighttable td.linenos{background-color:var(--color-code-background);border-bottom-left-radius:.2rem;border-top-left-radius:.2rem;color:var(--color-code-foreground);padding:.625rem 0 .625rem .875rem}.highlighttable .linenodiv{box-shadow:-.0625rem 0 var(--color-foreground-border) inset;font-size:var(--code-font-size);padding-right:.875rem}.highlighttable td.code{display:block;flex:1;overflow:hidden;padding:0}.highlighttable td.code .highlight{border-bottom-left-radius:0;border-top-left-radius:0}.highlight span.linenos{box-shadow:-.0625rem 0 var(--color-foreground-border) inset;display:inline-block;margin-right:.875rem;padding-left:0;padding-right:.875rem}.footnote-reference{font-size:var(--font-size--small--4);vertical-align:super}dl.footnote.brackets{color:var(--color-foreground-secondary);display:grid;font-size:var(--font-size--small);grid-template-columns:-webkit-max-content auto;grid-template-columns:max-content auto}dl.footnote.brackets dt{margin:0}dl.footnote.brackets dt>.fn-backref{margin-left:.25rem}dl.footnote.brackets dt:after{content:":"}dl.footnote.brackets dt .brackets:before{content:"["}dl.footnote.brackets dt .brackets:after{content:"]"}dl.footnote.brackets dd{margin:0;padding:0 1rem}aside.footnote{color:var(--color-foreground-secondary);font-size:var(--font-size--small)}aside.footnote>span,div.citation>span{float:left;font-weight:500;padding-right:.25rem}aside.footnote>p,div.citation>p{margin-left:2rem}img{box-sizing:border-box;height:auto;max-width:100%}article .figure,article figure{border-radius:.2rem;margin:0}article .figure :last-child,article figure :last-child{margin-bottom:0}article .align-left{clear:left;float:left;margin:0 1rem 1rem}article .align-right{clear:right;float:right;margin:0 1rem 1rem}article .align-center,article .align-default{display:block;margin-left:auto;margin-right:auto;text-align:center}article table.align-default{display:table;text-align:initial}.domainindex-jumpbox,.genindex-jumpbox{border-bottom:1px solid var(--color-background-border);border-top:1px solid var(--color-background-border);padding:.25rem}.domainindex-section h2,.genindex-section h2{margin-bottom:.5rem;margin-top:.75rem}.domainindex-section ul,.genindex-section ul{margin-bottom:0;margin-top:0}ol,ul{margin-bottom:1rem;margin-top:1rem;padding-left:1.2rem}ol li>p:first-child,ul li>p:first-child{margin-bottom:.25rem;margin-top:.25rem}ol li>p:last-child,ul li>p:last-child{margin-top:.25rem}ol li>ol,ol li>ul,ul li>ol,ul li>ul{margin-bottom:.5rem;margin-top:.5rem}ol.arabic{list-style:decimal}ol.loweralpha{list-style:lower-alpha}ol.upperalpha{list-style:upper-alpha}ol.lowerroman{list-style:lower-roman}ol.upperroman{list-style:upper-roman}.simple li>ol,.simple li>ul,.toctree-wrapper li>ol,.toctree-wrapper li>ul{margin-bottom:0;margin-top:0}.field-list dt,.option-list dt,dl.footnote dt,dl.glossary dt,dl.simple dt,dl:not([class]) dt{font-weight:500;margin-top:.25rem}.field-list dt+dt,.option-list dt+dt,dl.footnote dt+dt,dl.glossary dt+dt,dl.simple dt+dt,dl:not([class]) dt+dt{margin-top:0}.field-list dt .classifier:before,.option-list dt .classifier:before,dl.footnote dt .classifier:before,dl.glossary dt .classifier:before,dl.simple dt .classifier:before,dl:not([class]) dt .classifier:before{content:":";margin-left:.2rem;margin-right:.2rem}.field-list dd ul,.field-list dd>p:first-child,.option-list dd ul,.option-list dd>p:first-child,dl.footnote dd ul,dl.footnote dd>p:first-child,dl.glossary dd ul,dl.glossary dd>p:first-child,dl.simple dd ul,dl.simple dd>p:first-child,dl:not([class]) dd ul,dl:not([class]) dd>p:first-child{margin-top:.125rem}.field-list dd ul,.option-list dd ul,dl.footnote dd ul,dl.glossary dd ul,dl.simple dd ul,dl:not([class]) dd ul{margin-bottom:.125rem}.math-wrapper{overflow-x:auto;width:100%}div.math{position:relative;text-align:center}div.math .headerlink,div.math:focus .headerlink{display:none}div.math:hover .headerlink{display:inline-block}div.math span.eqno{position:absolute;right:.5rem;top:50%;transform:translateY(-50%);z-index:1}abbr[title]{cursor:help}.problematic{color:var(--color-problematic)}kbd:not(.compound){background-color:var(--color-background-secondary);border:1px solid var(--color-foreground-border);border-radius:.2rem;box-shadow:0 .0625rem 0 rgba(0,0,0,.2),inset 0 0 0 .125rem var(--color-background-primary);color:var(--color-foreground-primary);display:inline-block;font-size:var(--font-size--small--3);margin:0 .2rem;padding:0 .2rem;vertical-align:text-bottom}blockquote{background:var(--color-background-secondary);border-left:4px solid var(--color-background-border);margin-left:0;margin-right:0;padding:.5rem 1rem}blockquote .attribution{font-weight:600;text-align:right}blockquote.highlights,blockquote.pull-quote{font-size:1.25em}blockquote.epigraph,blockquote.pull-quote{border-left-width:0;border-radius:.5rem}blockquote.highlights{background:transparent;border-left-width:0}p .reference img{vertical-align:middle}p.rubric{font-size:1.125em;font-weight:700;line-height:1.25}dd p.rubric{font-size:var(--font-size--small);font-weight:inherit;line-height:inherit;text-transform:uppercase}article .sidebar{background-color:var(--color-background-secondary);border:1px solid var(--color-background-border);border-radius:.2rem;clear:right;float:right;margin-left:1rem;margin-right:0;width:30%}article .sidebar>*{padding-left:1rem;padding-right:1rem}article .sidebar>ol,article .sidebar>ul{padding-left:2.2rem}article .sidebar .sidebar-title{border-bottom:1px solid var(--color-background-border);font-weight:500;margin:0;padding:.5rem 1rem}.table-wrapper{margin-bottom:.5rem;margin-top:1rem;overflow-x:auto;padding:.2rem .2rem .75rem;width:100%}table.docutils{border-collapse:collapse;border-radius:.2rem;border-spacing:0;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1)}table.docutils th{background:var(--color-table-header-background)}table.docutils td,table.docutils th{border-bottom:1px solid var(--color-table-border);border-left:1px solid var(--color-table-border);border-right:1px solid var(--color-table-border);padding:0 .25rem}table.docutils td p,table.docutils th p{margin:.25rem}table.docutils td:first-child,table.docutils th:first-child{border-left:none}table.docutils td:last-child,table.docutils th:last-child{border-right:none}table.docutils td.text-left,table.docutils th.text-left{text-align:left}table.docutils td.text-right,table.docutils th.text-right{text-align:right}table.docutils td.text-center,table.docutils th.text-center{text-align:center}:target{scroll-margin-top:.5rem}@media(max-width:67em){:target{scroll-margin-top:calc(.5rem + var(--header-height))}section>span:target{scroll-margin-top:calc(.8rem + var(--header-height))}}.headerlink{font-weight:100;-webkit-user-select:none;-moz-user-select:none;user-select:none}.code-block-caption>.headerlink,dl dt>.headerlink,figcaption p>.headerlink,h1>.headerlink,h2>.headerlink,h3>.headerlink,h4>.headerlink,h5>.headerlink,h6>.headerlink,p.caption>.headerlink,table>caption>.headerlink{margin-left:.5rem;visibility:hidden}.code-block-caption:hover>.headerlink,dl dt:hover>.headerlink,figcaption p:hover>.headerlink,h1:hover>.headerlink,h2:hover>.headerlink,h3:hover>.headerlink,h4:hover>.headerlink,h5:hover>.headerlink,h6:hover>.headerlink,p.caption:hover>.headerlink,table>caption:hover>.headerlink{visibility:visible}.code-block-caption>.toc-backref,dl dt>.toc-backref,figcaption p>.toc-backref,h1>.toc-backref,h2>.toc-backref,h3>.toc-backref,h4>.toc-backref,h5>.toc-backref,h6>.toc-backref,p.caption>.toc-backref,table>caption>.toc-backref{color:inherit;-webkit-text-decoration-line:none;text-decoration-line:none}figure:hover>figcaption>p>.headerlink,table:hover>caption>.headerlink{visibility:visible}:target>h1:first-of-type,:target>h2:first-of-type,:target>h3:first-of-type,:target>h4:first-of-type,:target>h5:first-of-type,:target>h6:first-of-type,span:target~h1:first-of-type,span:target~h2:first-of-type,span:target~h3:first-of-type,span:target~h4:first-of-type,span:target~h5:first-of-type,span:target~h6:first-of-type{background-color:var(--color-highlight-on-target)}:target>h1:first-of-type code.literal,:target>h2:first-of-type code.literal,:target>h3:first-of-type code.literal,:target>h4:first-of-type code.literal,:target>h5:first-of-type code.literal,:target>h6:first-of-type code.literal,span:target~h1:first-of-type code.literal,span:target~h2:first-of-type code.literal,span:target~h3:first-of-type code.literal,span:target~h4:first-of-type code.literal,span:target~h5:first-of-type code.literal,span:target~h6:first-of-type code.literal{background-color:transparent}.literal-block-wrapper:target .code-block-caption,.this-will-duplicate-information-and-it-is-still-useful-here li :target,figure:target,table:target>caption{background-color:var(--color-highlight-on-target)}dt:target{background-color:var(--color-highlight-on-target)!important}.footnote-reference:target,.footnote>dt:target+dd{background-color:var(--color-highlight-on-target)}.guilabel{background-color:var(--color-guilabel-background);border:1px solid var(--color-guilabel-border);border-radius:.5em;color:var(--color-guilabel-text);font-size:.9em;padding:0 .3em}footer{display:flex;flex-direction:column;font-size:var(--font-size--small);margin-top:2rem}.bottom-of-page{align-items:center;border-top:1px solid var(--color-background-border);color:var(--color-foreground-secondary);display:flex;justify-content:space-between;line-height:1.5;margin-top:1rem;padding-bottom:1rem;padding-top:1rem}@media(max-width:46em){.bottom-of-page{flex-direction:column-reverse;gap:.25rem;text-align:center}}.bottom-of-page .left-details{font-size:var(--font-size--small)}.bottom-of-page .right-details{display:flex;flex-direction:column;gap:.25rem;text-align:right}.bottom-of-page .icons{display:flex;font-size:1rem;gap:.25rem;justify-content:flex-end}.bottom-of-page .icons a{text-decoration:none}.bottom-of-page .icons img,.bottom-of-page .icons svg{font-size:1.125rem;height:1em;width:1em}.related-pages a{align-items:center;display:flex;text-decoration:none}.related-pages a:hover .page-info .title{color:var(--color-link);text-decoration:underline;-webkit-text-decoration-color:var(--color-link-underline);text-decoration-color:var(--color-link-underline)}.related-pages a svg.furo-related-icon,.related-pages a svg.furo-related-icon>use{color:var(--color-foreground-border);flex-shrink:0;height:.75rem;margin:0 .5rem;width:.75rem}.related-pages a.next-page{clear:right;float:right;max-width:50%;text-align:right}.related-pages a.prev-page{clear:left;float:left;max-width:50%}.related-pages a.prev-page svg{transform:rotate(180deg)}.page-info{display:flex;flex-direction:column;overflow-wrap:anywhere}.next-page .page-info{align-items:flex-end}.page-info .context{align-items:center;color:var(--color-foreground-muted);display:flex;font-size:var(--font-size--small);padding-bottom:.1rem;text-decoration:none}ul.search{list-style:none;padding-left:0}ul.search li{border-bottom:1px solid var(--color-background-border);padding:1rem 0}[role=main] .highlighted{background-color:var(--color-highlighted-background);color:var(--color-highlighted-text)}.sidebar-brand{display:flex;flex-direction:column;flex-shrink:0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-decoration:none}.sidebar-brand-text{color:var(--color-sidebar-brand-text);font-size:1.5rem;overflow-wrap:break-word}.sidebar-brand-text,.sidebar-logo-container{margin:var(--sidebar-item-spacing-vertical) 0}.sidebar-logo{display:block;margin:0 auto;max-width:100%}.sidebar-search-container{align-items:center;background:var(--color-sidebar-search-background);display:flex;margin-top:var(--sidebar-search-space-above);position:relative}.sidebar-search-container:focus-within,.sidebar-search-container:hover{background:var(--color-sidebar-search-background--focus)}.sidebar-search-container:before{background-color:var(--color-sidebar-search-icon);content:"";height:var(--sidebar-search-icon-size);left:var(--sidebar-item-spacing-horizontal);-webkit-mask-image:var(--icon-search);mask-image:var(--icon-search);position:absolute;width:var(--sidebar-search-icon-size)}.sidebar-search{background:transparent;border:none;border-bottom:1px solid var(--color-sidebar-search-border);border-top:1px solid var(--color-sidebar-search-border);box-sizing:border-box;color:var(--color-sidebar-search-foreground);padding:var(--sidebar-search-input-spacing-vertical) var(--sidebar-search-input-spacing-horizontal) var(--sidebar-search-input-spacing-vertical) calc(var(--sidebar-item-spacing-horizontal) + var(--sidebar-search-input-spacing-horizontal) + var(--sidebar-search-icon-size));width:100%;z-index:10}.sidebar-search:focus{outline:none}.sidebar-search::-moz-placeholder{font-size:var(--sidebar-search-input-font-size)}.sidebar-search::placeholder{font-size:var(--sidebar-search-input-font-size)}#searchbox .highlight-link{margin:0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal) 0;text-align:center}#searchbox .highlight-link a{color:var(--color-sidebar-search-icon);font-size:var(--font-size--small--2)}.sidebar-tree{font-size:var(--sidebar-item-font-size);margin-bottom:var(--sidebar-item-spacing-vertical);margin-top:var(--sidebar-tree-space-above)}.sidebar-tree ul{display:flex;flex-direction:column;list-style:none;margin-bottom:0;margin-top:0;padding:0}.sidebar-tree li{margin:0;position:relative}.sidebar-tree li>ul{margin-left:var(--sidebar-item-spacing-horizontal)}.sidebar-tree .icon,.sidebar-tree .reference{color:var(--color-sidebar-link-text)}.sidebar-tree .reference{box-sizing:border-box;display:inline-block;height:100%;line-height:var(--sidebar-item-line-height);overflow-wrap:anywhere;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-decoration:none;width:100%}.sidebar-tree .reference:hover{background:var(--color-sidebar-item-background--hover)}.sidebar-tree .reference.external:after{color:var(--color-sidebar-link-text);content:url("data:image/svg+xml;charset=utf-8,%3Csvg width='12' height='12' xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' stroke-width='1.5' stroke='%23607D8B' fill='none' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpath d='M0 0h24v24H0z' stroke='none'/%3E%3Cpath d='M11 7H6a2 2 0 0 0-2 2v9a2 2 0 0 0 2 2h9a2 2 0 0 0 2-2v-5M10 14 20 4M15 4h5v5'/%3E%3C/svg%3E");margin:0 .25rem;vertical-align:middle}.sidebar-tree .current-page>.reference{font-weight:700}.sidebar-tree label{align-items:center;cursor:pointer;display:flex;height:var(--sidebar-item-height);justify-content:center;position:absolute;right:0;top:0;-webkit-user-select:none;-moz-user-select:none;user-select:none;width:var(--sidebar-expander-width)}.sidebar-tree .caption,.sidebar-tree :not(.caption)>.caption-text{color:var(--color-sidebar-caption-text);font-size:var(--sidebar-caption-font-size);font-weight:700;margin:var(--sidebar-caption-space-above) 0 0 0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-transform:uppercase}.sidebar-tree li.has-children>.reference{padding-right:var(--sidebar-expander-width)}.sidebar-tree .toctree-l1>.reference,.sidebar-tree .toctree-l1>label .icon{color:var(--color-sidebar-link-text--top-level)}.sidebar-tree label{background:var(--color-sidebar-item-expander-background)}.sidebar-tree label:hover{background:var(--color-sidebar-item-expander-background--hover)}.sidebar-tree .current>.reference{background:var(--color-sidebar-item-background--current)}.sidebar-tree .current>.reference:hover{background:var(--color-sidebar-item-background--hover)}.toctree-checkbox{display:none;position:absolute}.toctree-checkbox~ul{display:none}.toctree-checkbox~label .icon svg{transform:rotate(90deg)}.toctree-checkbox:checked~ul{display:block}.toctree-checkbox:checked~label .icon svg{transform:rotate(-90deg)}.toc-title-container{padding:var(--toc-title-padding);padding-top:var(--toc-spacing-vertical)}.toc-title{color:var(--color-toc-title-text);font-size:var(--toc-title-font-size);padding-left:var(--toc-spacing-horizontal);text-transform:uppercase}.no-toc{display:none}.toc-tree-container{padding-bottom:var(--toc-spacing-vertical)}.toc-tree{border-left:1px solid var(--color-background-border);font-size:var(--toc-font-size);line-height:1.3;padding-left:calc(var(--toc-spacing-horizontal) - var(--toc-item-spacing-horizontal))}.toc-tree>ul>li:first-child{padding-top:0}.toc-tree>ul>li:first-child>ul{padding-left:0}.toc-tree>ul>li:first-child>a{display:none}.toc-tree ul{list-style-type:none;margin-bottom:0;margin-top:0;padding-left:var(--toc-item-spacing-horizontal)}.toc-tree li{padding-top:var(--toc-item-spacing-vertical)}.toc-tree li.scroll-current>.reference{color:var(--color-toc-item-text--active);font-weight:700}.toc-tree .reference{color:var(--color-toc-item-text);overflow-wrap:anywhere;text-decoration:none}.toc-scroll{max-height:100vh;overflow-y:scroll}.contents:not(.this-will-duplicate-information-and-it-is-still-useful-here){background:rgba(255,0,0,.25);color:var(--color-problematic)}.contents:not(.this-will-duplicate-information-and-it-is-still-useful-here):before{content:"ERROR: Adding a table of contents in Furo-based documentation is unnecessary, and does not work well with existing styling.Add a 'this-will-duplicate-information-and-it-is-still-useful-here' class, if you want an escape hatch."}.text-align\:left>p{text-align:left}.text-align\:center>p{text-align:center}.text-align\:right>p{text-align:right} +/*# sourceMappingURL=furo.css.map*/ \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/styles/furo.css.map b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/styles/furo.css.map new file mode 100644 index 0000000000000000000000000000000000000000..92af40757204ca6d131975194cf0dd2271695b4a --- /dev/null +++ b/venv/lib/python3.10/site-packages/bitblas/3rdparty/cutlass/python/docs/_static/styles/furo.css.map @@ -0,0 +1 @@ +{"version":3,"file":"styles/furo.css","mappings":"AAAA,2EAA2E,CAU3E,KAEE,6BAA8B,CAD9B,gBAEF,CASA,KACE,QACF,CAMA,KACE,aACF,CAOA,GACE,aAAc,CACd,cACF,CAUA,GACE,sBAAuB,CACvB,QAAS,CACT,gBACF,CAOA,IACE,+BAAiC,CACjC,aACF,CASA,EACE,4BACF,CAOA,YACE,kBAAmB,CACnB,yBAA0B,CAC1B,gCACF,CAMA,SAEE,kBACF,CAOA,cAGE,+BAAiC,CACjC,aACF,CAeA,QAEE,aAAc,CACd,aAAc,CACd,iBAAkB,CAClB,uBACF,CAEA,IACE,aACF,CAEA,IACE,SACF,CASA,IACE,iBACF,CAUA,sCAKE,mBAAoB,CACpB,cAAe,CACf,gBAAiB,CACjB,QACF,CAOA,aAEE,gBACF,CAOA,cAEE,mBACF,CAMA,gDAIE,yBACF,CAMA,wHAIE,iBAAkB,CAClB,SACF,CAMA,4GAIE,6BACF,CAMA,SACE,0BACF,CASA,OACE,qBAAsB,CACtB,aAAc,CACd,aAAc,CACd,cAAe,CACf,SAAU,CACV,kBACF,CAMA,SACE,uBACF,CAMA,SACE,aACF,CAOA,6BAEE,qBAAsB,CACtB,SACF,CAMA,kFAEE,WACF,CAOA,cACE,4BAA6B,CAC7B,mBACF,CAMA,yCACE,uBACF,CAOA,6BACE,yBAA0B,CAC1B,YACF,CASA,QACE,aACF,CAMA,QACE,iBACF,CAiBA,kBACE,YACF,CCvVA,aAcE,kEACE,uBAOF,WACE,iDAMF,gCACE,wBAEF,qCAEE,uBADA,uBACA,CAEF,SACE,wBAtBA,CCpBJ,iBAOE,6BAEA,mBANA,qBAEA,sBACA,0BAFA,oBAHA,4BAOA,6BANA,mBAOA,CAEF,gBACE,aCPF,KCGE,mHAEA,wGAGA,wBAAyB,CACzB,wBAAyB,CACzB,4BAA6B,CAC7B,yBAA0B,CAC1B,2BAA4B,CAG5B,sDAAuD,CACvD,gDAAiD,CACjD,wDAAyD,CAGzD,0CAA2C,CAC3C,gDAAiD,CACjD,gDAAiD,CAKjD,gCAAiC,CACjC,sCAAuC,CAGvC,2CAA4C,CAG5C,uCAAwC,CChCxC,+FAGA,uBAAwB,CAGxB,iCAAkC,CAClC,kCAAmC,CAEnC,+BAAgC,CAChC,sCAAuC,CACvC,sCAAuC,CACvC,qGAIA,mDAAoD,CAEpD,mCAAoC,CACpC,8CAA+C,CAC/C,gDAAiD,CACjD,kCAAmC,CACnC,6DAA8D,CAG9D,6BAA8B,CAC9B,6BAA8B,CAC9B,+BAAgC,CAChC,kCAAmC,CACnC,kCAAmC,CCPjC,ukBCYA,srCAZF,kaCVA,mLAOA,oTAWA,2UAaA,0CACA,gEACA,0CAGA,gEAUA,yCACA,+DAGA,4CACA,CACA,iEAGA,sGACA,uCACA,4DAGA,sCACA,2DAEA,4CACA,kEACA,oGACA,CAEA,0GACA,+CAGA,+MAOA,+EACA,wCAIA,4DACA,sEACA,kEACA,sEACA,gDAGA,+DACA,0CACA,gEACA,gGACA,CAGA,2DACA,qDAGA,0CACA,8CACA,oDACA,oDL7GF,iCAEA,iEAME,oCKyGA,yDAIA,sCACA,kCACA,sDAGA,0CACA,kEACA,oDAEA,sDAGA,oCACA,oEAIA,CAGA,yDAGA,qDACA,oDAGA,6DAIA,iEAGA,2DAEA,2DL9IE,4DAEA,gEAIF,gEKgGA,gFAIA,oNAOA,qDAEA,gFAIA,4DAIA,oEAMA,yEAIA,6DACA,0DAGA,uDAGA,qDAEA,wDLpII,6DAEA,yDACE,2DAMN,uCAIA,yCACE,8CAGF,sDMjDA,6DAKA,oCAIA,4CACA,kBAGF,sBAMA,2BAME,qCAGA,qCAEA,iCAEA,+BAEA,mCAEA,qCAIA,CACA,gCACA,gDAKA,kCAIA,6BAEA,0CAQA,kCAIF,8BAGE,8BACA,uCAGF,sCAKE,kCAEA,sDACA,uEAGE,sDACA,gGACF,wCAGI,sBACA,yHCzEJ,2BACA,qCAGF,sEAGE,kEAGA,sHAGA,2IACE,8BACA,8BAOF,uCAEA,wEAGA,sDACA,iCAKA,CAEF,qCAEE,sDACA,gCACA,gEAKA,+CAOE,sBACA,gEAGA,GAYF,yLACA,gDAGA,mBAEA,wCACA,wCAGF,CAEE,iCAGF,wBACE,mBAIF,oBAFE,eAEF,CAJE,gBAEA,CAMA,mBACA,mBAGA,mDAIA,YACA,mBAEA,CACA,kBAGF,OAJE,kBAQA,CAJF,GACE,aAGA,IACA,mCACA,qBAEF,IACE,oBAEA,aACA,CAFA,WAEA,GAEE,oBAKJ,CAPE,gBAOF,aACE,+CAGA,UAHA,kCAGA,4BACA,GAEA,uBACA,CAHA,yBAEA,CACA,yDAGF,kDAEE,SACA,8BAEA,iEAGE,yDACA,sEAEA,iEAEE,yHAKN,kDAMA,0DAIE,CANA,oBAMA,0GAOA,aAEF,CAHE,YAGF,4HAWE,+CACE,iCAIJ,0CAGE,CALE,qCAEJ,CAHI,WAMF,SAIA,0CAIA,CANF,qCAME,mBACA,gBACA,gBAIA,+CAEE,CAIF,kDAGF,CAPI,8BAGJ,CAKE,YACF,CAbE,2BAEA,CAHA,WAYF,UAEA,yBACE,kBAIA,iEAKA,iCAGA,mDAEA,mBACF,OACE,iBAQA,0CAIA,CAPA,6DAGA,CALF,qBAEE,CAOA,qCAEE,CAGA,eAHA,sBAGA,gCAKF,qBACE,WACA,aACA,sCAEA,mBAOJ,6BASE,kCACA,CAHA,sBACA,aACA,CARA,uBAGA,gBAEA,MAIA,6BAEA,yBACA,2DAEA,sBAGA,8BACA,CANA,wBAMA,2BAEE,YACA,sBACA,WAEF,CAFE,UAEF,eAeF,kBAEE,CAhBE,qDAGA,qCAOJ,CAEI,YAEJ,CAJA,2BAEI,CAIF,eACE,qBACF,4CAIE,uBACA,sBACF,cACE,CAFA,aACF,CAEE,kBADA,kBACA,yBAGF,oCACE,6DAMF,qDAGE,CC1VY,8BDgWd,oCAEA,uDAEA,CACE,8CAIA,gCAEA,YACA,8CACA,CAEA,oCAGE,CAHF,oCAGE,mBAEA,mDADA,YADA,qBACA,WACA,sBAEE,WACA,uDAEN,eAFM,YAEN,iDAGE,uCAIA,YAGF,+CAKE,kBACA,CALA,sBAKA,mBACF,aACE,aACA,yBAEJ,YAGI,CAHJ,YAOE,SACE,CAFJ,kBACE,CAHE,gBAEJ,CAHI,iBAKA,6CAIA,aACA,YEhaJ,4BAEE,aADA,iBACA,6BAEA,kCAEA,SACA,UAIA,gCACA,CALA,SAEA,SAEA,CAJA,0EAEA,CAFA,OAKA,CAGA,mDACE,iBAGF,gCACE,CADF,UACE,aAEJ,iCACE,CADF,UAEE,wCAEA,WACA,WAFA,UAEA,6CAIA,yCACA,WAGA,WAJA,UAIA,kCACE,OACA,CAFF,KAEE,cAQF,0CACE,CAFF,kBACA,CACE,wEACA,CARA,YACA,CAKF,mBAFF,OAII,eACA,CAJF,iCAJE,cAGJ,CANI,oBAEA,CAKF,SAIE,2BADA,UACA,kBAGF,sCACA,CAFF,WACE,WACA,qCACE,gCACA,2EACA,sDAKJ,aACE,mDAII,CAJJ,6CAII,kEACA,iBACE,iDACA,+CACE,aACA,WADA,+BACA,uEANN,YACE,mDAEE,kBACA,CADA,2CADF,uCACE,MACA,0DACE,yCACA,qGALJ,oCACA,uCACE,CAFF,UAEE,uEACA,+CACE,oDACA,6DANN,kCACE,kCACA,gBADA,UACA,yBACE,wDACA,cADA,UACA,qBACE,6CACA,yFALJ,sCACA,CAEE,gBACE,CAHJ,gBAGI,sBAHJ,uBACE,4DACA,4CACE,iDAJJ,2CACA,CADA,gBAEE,gBAGE,sBALJ,+BAII,iBAFF,gDACA,WACE,YADF,uCACE,6EACA,2BANN,8CACE,kDACA,0CACE,8BACA,yFACE,sBACA,sFALJ,mEACA,sBACE,kEACA,6EACE,uCACA,kEALJ,qGAEE,kEACA,6EACE,uCACA,kEALJ,8CACA,uDACE,sEACA,2EACE,sCACA,iEALJ,mGACA,qCACE,oDACA,0DACE,6GACA,gDAGR,yDCpEA,sEACE,CACA,6GACE,gEACF,iGAIF,wFACE,qDAGA,mGAEE,2CAEF,4FACE,gCACF,wGACE,8DAEE,6FAIA,iJAKN,6GACE,gDAKF,yDACA,qCAGA,6BACA,kBACA,qDAKA,oCAEA,+DAGA,2CAGE,oDAIA,oEAEE,qBAGJ,wDAIA,uCAEE,kEAEF,CACF,6CAEE,uDAEA,oCAIF,4BACE,6BAEA,gEAEE,+CAIF,0EC9FA,sDAGE,+DCLJ,sCAGE,8BAKA,wJAIE,gBACA,yGCZF,mBAQA,2MAIA,oBAOF,wGAKE,iCAEE,CAFF,wBAEE,8GAWF,mBAEE,2GAMA,mBAEA,6HAOF,YAGA,mIAOE,gBADA,YACA,4FAOF,8BACA,uBAYA,sCAEE,CAFF,qBARA,wCAEA,CAHA,8BACA,CAFA,eACA,CAGA,mBAEA,sBAEA,kDAEA,CAEE,kCACE,6BACA,4CAMJ,kDAGA,eAIA,6CACE,mCACA,0CACA,8BAEA,sCACA,cAEF,+BACE,CAHA,eAGA,YACA,4BACA,gEAGF,0DAME,sBAFA,kBAGE,+BACA,4BAIJ,aACE,oBACA,CAFF,gBAEE,yBAEA,eACA,CApHsB,YAmHtB,CACA,sECpIF,mDACA,2FAMA,iCAGA,0FAEE,eACA,CAFF,YAEE,0BACE,8CAEF,mBAIE,qCACE,CACF,yBADE,iBACF,8BAGJ,+CAKF,aACE,wCACA,kDAEF,YAEE,CAFF,YAEE,CClCA,mFDwCA,QCzCF,UAGE,CAFA,IACA,aACA,mCAGA,eACE,kCAGA,uDAGF,mBAKA,6CAGE,CALA,mBAEF,CAGE,kCAEF,CARE,kBACA,CAFA,eASF,YAEE,mBACA,CAHF,UAGE,wCC7BJ,oBDkCE,8CAEE,iBCpCJ,iBACE,wDACA,gEASE,6CCLF,CDIE,uBACA,CALF,oBACE,4BAEF,8BCAE,2CAEE,CALJ,kCAGE,CDHF,aAGA,eACE,CAJF,uBCKI,gCAEF,gDAGA,kDAGE,iBAIF,cADF,UACE,uBAEA,iCAEA,wCAEA,6CAEA,CASE,+BASJ,CAZE,4BAGE,CATF,kCAMA,kCAYF,4BACE,2DAEA,CAHF,+BACE,CADF,qBAGE,2GAGA,wIAEE,CAFF,8EAEE,qBACA,oCAGF,6RAIA,sGACE,oDChEJ,WAEF,yBACE,QACA,eAEA,gBAEE,uCAGA,CALF,iCAKE,uCAGA,0BACA,CACA,oBACA,iCClBJ,gBACE,KAGF,qBACE,YAGF,CAHE,cAGF,gCAEE,mBACA,iEAEA,oCACA,wCAEA,sBACA,WAEA,CAFA,YAEA,8EAEA,mCAFA,iBAEA,6BAIA,wEAKA,sDAIE,CARF,mDAIA,CAIE,cAEF,8CAIA,oBAFE,iBAEF,8CAGE,eAEF,CAFE,YAEF,OAEE,kBAGJ,CAJI,eACA,CAFF,mBAKF,yCCjDE,oBACA,CAFA,iBAEA,uCAKE,iBACA,qCAGA,mBCZJ,CDWI,gBCXJ,6BAEE,eACA,sBAGA,eAEA,sBACA,oDACA,iGAMA,gBAFE,YAEF,8FAME,iJClBF,YACA,gNAUE,6BAEF,oTAcI,kBACF,gHAIA,qBACE,eACF,qDACE,kBACF,6DACE,4BCxCJ,oBAEF,qCAEI,+CAGF,uBACE,uDAGJ,oBAkBE,mDAhBA,+CAaA,CAbA,oBAaA,0FAEE,CAFF,gGAbA,+BAaA,0BAGA,mQAIA,oNAEE,kCADA,gBACA,aAGJ,sDAHI,mBAGJ,yBAYI,+VACE,sDAGA,iBAHA,2BAGA,kWAGN,iDAEE,CALI,gGAGN,CAHM,gBAKJ,yCAGF,0EACE,2EAGF,iBACE,yDAOA,0EAGF,6EAEE,iBC/EA,wDACA,4DACA,qBAEA,oDCDA,6BACA,yBACA,sBAEA,iBAGF,sNAYE,iBAEA,kBAdF,wRA8BI,kBACA,iOAkBA,aACA,4DACE,uEAEA,uVAoBA,iDAKA,ieC1EJ,4BACA,CCFF,6JAEE,iDACA,sEAIA,mDAGA,iDAOF,4DAGE,8CAEA,CAEA,kBACA,CAHA,gCAEA,CACA,eADA,cACA,oBAEE,uBAFF,kCAEE,gCAEF,kBACE,CAIA,mDAEA,CAHA,uCACA,CALF,aACE,6BAEA,CAIA,gBAJA,mCACA,CADA,gBAIA,wBACA,6CAGF,YAHE,iBAGF,gCAGA,iEACA,6CAEA,qDACA,6EACA,2EACA,8GAEA,yCAGA,uBACA,CAFA,yBACA,CACA,yDAKA,kDACE,mFAKJ,oCACE,CANE,aAKJ,CACE,qEAIA,YAFA,WAEA,CAHA,aACA,CAEA,gBACE,4BACA,sBADA,aACA,gCAMF,oCACA,yDACA,2CAEA,qBAGE,kBAEA,CACA,mCAIF,CARE,YACA,CAOF,iCAEE,CAPA,oBACA,CAQA,oBACE,uDAEJ,sDAGA,CAHA,cAGA,0BACE,oDAIA,oCACA,4BACA,sBAGA,cAEA,oFAGA,sBAEA,yDACE,CAIA,iBAJA,wBAIA,6CAJA,6CAOA,4BAGJ,CAHI,cAGJ,yCAGA,kBACE,CAIA,iDAEA,CATA,YAEF,CACE,4CAGA,kBAIA,wEAEA,wDAIF,kCAOE,iDACA,CARF,WAIE,sCAGA,CANA,2CACA,CAMA,oEARF,iBACE,CACA,qCAMA,iBAuBE,uBAlBF,YAKA,2DALA,uDAKA,CALA,sBAiBA,4CACE,CALA,gRAIF,YACE,UAEN,uBACE,YACA,mCAOE,+CAGA,8BAGF,+CAGA,4BCjNA,SDiNA,qFCjNA,gDAGA,sCACA,qCACA,sDAIF,CAIE,kDAGA,CAPF,0CAOE,kBAEA,kDAEA,CAHA,eACA,CAFA,YACA,CADA,SAIA,mHAIE,CAGA,6CAFA,oCAeE,CAbF,yBACE,qBAEJ,CAGE,oBACA,CAEA,YAFA,2CACF,CACE,uBAEA,mFAEE,CALJ,oBACE,CAEA,UAEE,gCAGF,sDAEA,yCC7CJ,oCAGA,CD6CE,yXAQE,sCCrDJ,wCAGA,oCACE","sources":["webpack:///./node_modules/normalize.css/normalize.css","webpack:///./src/furo/assets/styles/base/_print.sass","webpack:///./src/furo/assets/styles/base/_screen-readers.sass","webpack:///./src/furo/assets/styles/base/_theme.sass","webpack:///./src/furo/assets/styles/variables/_fonts.scss","webpack:///./src/furo/assets/styles/variables/_spacing.scss","webpack:///./src/furo/assets/styles/variables/_icons.scss","webpack:///./src/furo/assets/styles/variables/_admonitions.scss","webpack:///./src/furo/assets/styles/variables/_colors.scss","webpack:///./src/furo/assets/styles/base/_typography.sass","webpack:///./src/furo/assets/styles/_scaffold.sass","webpack:///./src/furo/assets/styles/variables/_layout.scss","webpack:///./src/furo/assets/styles/content/_admonitions.sass","webpack:///./src/furo/assets/styles/content/_api.sass","webpack:///./src/furo/assets/styles/content/_blocks.sass","webpack:///./src/furo/assets/styles/content/_captions.sass","webpack:///./src/furo/assets/styles/content/_code.sass","webpack:///./src/furo/assets/styles/content/_footnotes.sass","webpack:///./src/furo/assets/styles/content/_images.sass","webpack:///./src/furo/assets/styles/content/_indexes.sass","webpack:///./src/furo/assets/styles/content/_lists.sass","webpack:///./src/furo/assets/styles/content/_math.sass","webpack:///./src/furo/assets/styles/content/_misc.sass","webpack:///./src/furo/assets/styles/content/_rubrics.sass","webpack:///./src/furo/assets/styles/content/_sidebar.sass","webpack:///./src/furo/assets/styles/content/_tables.sass","webpack:///./src/furo/assets/styles/content/_target.sass","webpack:///./src/furo/assets/styles/content/_gui-labels.sass","webpack:///./src/furo/assets/styles/components/_footer.sass","webpack:///./src/furo/assets/styles/components/_search.sass","webpack:///./src/furo/assets/styles/components/_sidebar.sass","webpack:///./src/furo/assets/styles/components/_table_of_contents.sass","webpack:///./src/furo/assets/styles/_shame.sass"],"sourcesContent":["/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */\n\n/* Document\n ========================================================================== */\n\n/**\n * 1. Correct the line height in all browsers.\n * 2. Prevent adjustments of font size after orientation changes in iOS.\n */\n\nhtml {\n line-height: 1.15; /* 1 */\n -webkit-text-size-adjust: 100%; /* 2 */\n}\n\n/* Sections\n ========================================================================== */\n\n/**\n * Remove the margin in all browsers.\n */\n\nbody {\n margin: 0;\n}\n\n/**\n * Render the `main` element consistently in IE.\n */\n\nmain {\n display: block;\n}\n\n/**\n * Correct the font size and margin on `h1` elements within `section` and\n * `article` contexts in Chrome, Firefox, and Safari.\n */\n\nh1 {\n font-size: 2em;\n margin: 0.67em 0;\n}\n\n/* Grouping content\n ========================================================================== */\n\n/**\n * 1. Add the correct box sizing in Firefox.\n * 2. Show the overflow in Edge and IE.\n */\n\nhr {\n box-sizing: content-box; /* 1 */\n height: 0; /* 1 */\n overflow: visible; /* 2 */\n}\n\n/**\n * 1. Correct the inheritance and scaling of font size in all browsers.\n * 2. Correct the odd `em` font sizing in all browsers.\n */\n\npre {\n font-family: monospace, monospace; /* 1 */\n font-size: 1em; /* 2 */\n}\n\n/* Text-level semantics\n ========================================================================== */\n\n/**\n * Remove the gray background on active links in IE 10.\n */\n\na {\n background-color: transparent;\n}\n\n/**\n * 1. Remove the bottom border in Chrome 57-\n * 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.\n */\n\nabbr[title] {\n border-bottom: none; /* 1 */\n text-decoration: underline; /* 2 */\n text-decoration: underline dotted; /* 2 */\n}\n\n/**\n * Add the correct font weight in Chrome, Edge, and Safari.\n */\n\nb,\nstrong {\n font-weight: bolder;\n}\n\n/**\n * 1. Correct the inheritance and scaling of font size in all browsers.\n * 2. Correct the odd `em` font sizing in all browsers.\n */\n\ncode,\nkbd,\nsamp {\n font-family: monospace, monospace; /* 1 */\n font-size: 1em; /* 2 */\n}\n\n/**\n * Add the correct font size in all browsers.\n */\n\nsmall {\n font-size: 80%;\n}\n\n/**\n * Prevent `sub` and `sup` elements from affecting the line height in\n * all browsers.\n */\n\nsub,\nsup {\n font-size: 75%;\n line-height: 0;\n position: relative;\n vertical-align: baseline;\n}\n\nsub {\n bottom: -0.25em;\n}\n\nsup {\n top: -0.5em;\n}\n\n/* Embedded content\n ========================================================================== */\n\n/**\n * Remove the border on images inside links in IE 10.\n */\n\nimg {\n border-style: none;\n}\n\n/* Forms\n ========================================================================== */\n\n/**\n * 1. Change the font styles in all browsers.\n * 2. Remove the margin in Firefox and Safari.\n */\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n font-family: inherit; /* 1 */\n font-size: 100%; /* 1 */\n line-height: 1.15; /* 1 */\n margin: 0; /* 2 */\n}\n\n/**\n * Show the overflow in IE.\n * 1. Show the overflow in Edge.\n */\n\nbutton,\ninput { /* 1 */\n overflow: visible;\n}\n\n/**\n * Remove the inheritance of text transform in Edge, Firefox, and IE.\n * 1. Remove the inheritance of text transform in Firefox.\n */\n\nbutton,\nselect { /* 1 */\n text-transform: none;\n}\n\n/**\n * Correct the inability to style clickable types in iOS and Safari.\n */\n\nbutton,\n[type=\"button\"],\n[type=\"reset\"],\n[type=\"submit\"] {\n -webkit-appearance: button;\n}\n\n/**\n * Remove the inner border and padding in Firefox.\n */\n\nbutton::-moz-focus-inner,\n[type=\"button\"]::-moz-focus-inner,\n[type=\"reset\"]::-moz-focus-inner,\n[type=\"submit\"]::-moz-focus-inner {\n border-style: none;\n padding: 0;\n}\n\n/**\n * Restore the focus styles unset by the previous rule.\n */\n\nbutton:-moz-focusring,\n[type=\"button\"]:-moz-focusring,\n[type=\"reset\"]:-moz-focusring,\n[type=\"submit\"]:-moz-focusring {\n outline: 1px dotted ButtonText;\n}\n\n/**\n * Correct the padding in Firefox.\n */\n\nfieldset {\n padding: 0.35em 0.75em 0.625em;\n}\n\n/**\n * 1. Correct the text wrapping in Edge and IE.\n * 2. Correct the color inheritance from `fieldset` elements in IE.\n * 3. Remove the padding so developers are not caught out when they zero out\n * `fieldset` elements in all browsers.\n */\n\nlegend {\n box-sizing: border-box; /* 1 */\n color: inherit; /* 2 */\n display: table; /* 1 */\n max-width: 100%; /* 1 */\n padding: 0; /* 3 */\n white-space: normal; /* 1 */\n}\n\n/**\n * Add the correct vertical alignment in Chrome, Firefox, and Opera.\n */\n\nprogress {\n vertical-align: baseline;\n}\n\n/**\n * Remove the default vertical scrollbar in IE 10+.\n */\n\ntextarea {\n overflow: auto;\n}\n\n/**\n * 1. Add the correct box sizing in IE 10.\n * 2. Remove the padding in IE 10.\n */\n\n[type=\"checkbox\"],\n[type=\"radio\"] {\n box-sizing: border-box; /* 1 */\n padding: 0; /* 2 */\n}\n\n/**\n * Correct the cursor style of increment and decrement buttons in Chrome.\n */\n\n[type=\"number\"]::-webkit-inner-spin-button,\n[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\n\n/**\n * 1. Correct the odd appearance in Chrome and Safari.\n * 2. Correct the outline style in Safari.\n */\n\n[type=\"search\"] {\n -webkit-appearance: textfield; /* 1 */\n outline-offset: -2px; /* 2 */\n}\n\n/**\n * Remove the inner padding in Chrome and Safari on macOS.\n */\n\n[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\n\n/**\n * 1. Correct the inability to style clickable types in iOS and Safari.\n * 2. Change font properties to `inherit` in Safari.\n */\n\n::-webkit-file-upload-button {\n -webkit-appearance: button; /* 1 */\n font: inherit; /* 2 */\n}\n\n/* Interactive\n ========================================================================== */\n\n/*\n * Add the correct display in Edge, IE 10+, and Firefox.\n */\n\ndetails {\n display: block;\n}\n\n/*\n * Add the correct display in all browsers.\n */\n\nsummary {\n display: list-item;\n}\n\n/* Misc\n ========================================================================== */\n\n/**\n * Add the correct display in IE 10+.\n */\n\ntemplate {\n display: none;\n}\n\n/**\n * Add the correct display in IE 10.\n */\n\n[hidden] {\n display: none;\n}\n","// This file contains styles for managing print media.\n\n////////////////////////////////////////////////////////////////////////////////\n// Hide elements not relevant to print media.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n // Hide icon container.\n .content-icon-container\n display: none !important\n\n // Hide showing header links if hovering over when printing.\n .headerlink\n display: none !important\n\n // Hide mobile header.\n .mobile-header\n display: none !important\n\n // Hide navigation links.\n .related-pages\n display: none !important\n\n////////////////////////////////////////////////////////////////////////////////\n// Tweaks related to decolorization.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n // Apply a border around code which no longer have a color background.\n .highlight\n border: 0.1pt solid var(--color-foreground-border)\n\n////////////////////////////////////////////////////////////////////////////////\n// Avoid page break in some relevant cases.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n ul, ol, dl, a, table, pre, blockquote\n page-break-inside: avoid\n\n h1, h2, h3, h4, h5, h6, img, figure, caption\n page-break-inside: avoid\n page-break-after: avoid\n\n ul, ol, dl\n page-break-before: avoid\n",".visually-hidden\n position: absolute !important\n width: 1px !important\n height: 1px !important\n padding: 0 !important\n margin: -1px !important\n overflow: hidden !important\n clip: rect(0,0,0,0) !important\n white-space: nowrap !important\n border: 0 !important\n\n:-moz-focusring\n outline: auto\n","// This file serves as the \"skeleton\" of the theming logic.\n//\n// This contains the bulk of the logic for handling dark mode, color scheme\n// toggling and the handling of color-scheme-specific hiding of elements.\n\nbody\n @include fonts\n @include spacing\n @include icons\n @include admonitions\n @include default-admonition(#651fff, \"abstract\")\n @include default-topic(#14B8A6, \"pencil\")\n\n @include colors\n\n.only-light\n display: block !important\nhtml body .only-dark\n display: none !important\n\n// Ignore dark-mode hints if print media.\n@media not print\n // Enable dark-mode, if requested.\n body[data-theme=\"dark\"]\n @include colors-dark\n\n html & .only-light\n display: none !important\n .only-dark\n display: block !important\n\n // Enable dark mode, unless explicitly told to avoid.\n @media (prefers-color-scheme: dark)\n body:not([data-theme=\"light\"])\n @include colors-dark\n\n html & .only-light\n display: none !important\n .only-dark\n display: block !important\n\n//\n// Theme toggle presentation\n//\nbody[data-theme=\"auto\"]\n .theme-toggle svg.theme-icon-when-auto\n display: block\n\nbody[data-theme=\"dark\"]\n .theme-toggle svg.theme-icon-when-dark\n display: block\n\nbody[data-theme=\"light\"]\n .theme-toggle svg.theme-icon-when-light\n display: block\n","// Fonts used by this theme.\n//\n// There are basically two things here -- using the system font stack and\n// defining sizes for various elements in %ages. We could have also used `em`\n// but %age is easier to reason about for me.\n\n@mixin fonts {\n // These are adapted from https://systemfontstack.com/\n --font-stack: -apple-system, BlinkMacSystemFont, Segoe UI, Helvetica, Arial,\n sans-serif, Apple Color Emoji, Segoe UI Emoji;\n --font-stack--monospace: \"SFMono-Regular\", Menlo, Consolas, Monaco,\n Liberation Mono, Lucida Console, monospace;\n\n --font-size--normal: 100%;\n --font-size--small: 87.5%;\n --font-size--small--2: 81.25%;\n --font-size--small--3: 75%;\n --font-size--small--4: 62.5%;\n\n // Sidebar\n --sidebar-caption-font-size: var(--font-size--small--2);\n --sidebar-item-font-size: var(--font-size--small);\n --sidebar-search-input-font-size: var(--font-size--small);\n\n // Table of Contents\n --toc-font-size: var(--font-size--small--3);\n --toc-font-size--mobile: var(--font-size--normal);\n --toc-title-font-size: var(--font-size--small--4);\n\n // Admonitions\n //\n // These aren't defined in terms of %ages, since nesting these is permitted.\n --admonition-font-size: 0.8125rem;\n --admonition-title-font-size: 0.8125rem;\n\n // Code\n --code-font-size: var(--font-size--small--2);\n\n // API\n --api-font-size: var(--font-size--small);\n}\n","// Spacing for various elements on the page\n//\n// If the user wants to tweak things in a certain way, they are permitted to.\n// They also have to deal with the consequences though!\n\n@mixin spacing {\n // Header!\n --header-height: calc(\n var(--sidebar-item-line-height) + 4 * #{var(--sidebar-item-spacing-vertical)}\n );\n --header-padding: 0.5rem;\n\n // Sidebar\n --sidebar-tree-space-above: 1.5rem;\n --sidebar-caption-space-above: 1rem;\n\n --sidebar-item-line-height: 1rem;\n --sidebar-item-spacing-vertical: 0.5rem;\n --sidebar-item-spacing-horizontal: 1rem;\n --sidebar-item-height: calc(\n var(--sidebar-item-line-height) + 2 *#{var(--sidebar-item-spacing-vertical)}\n );\n\n --sidebar-expander-width: var(--sidebar-item-height); // be square\n\n --sidebar-search-space-above: 0.5rem;\n --sidebar-search-input-spacing-vertical: 0.5rem;\n --sidebar-search-input-spacing-horizontal: 0.5rem;\n --sidebar-search-input-height: 1rem;\n --sidebar-search-icon-size: var(--sidebar-search-input-height);\n\n // Table of Contents\n --toc-title-padding: 0.25rem 0;\n --toc-spacing-vertical: 1.5rem;\n --toc-spacing-horizontal: 1.5rem;\n --toc-item-spacing-vertical: 0.4rem;\n --toc-item-spacing-horizontal: 1rem;\n}\n","// Expose theme icons as CSS variables.\n\n$icons: (\n // Adapted from tabler-icons\n // url: https://tablericons.com/\n \"search\":\n url('data:image/svg+xml;charset=utf-8,'),\n // Factored out from mkdocs-material on 24-Aug-2020.\n // url: https://squidfunk.github.io/mkdocs-material/reference/admonitions/\n \"pencil\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"abstract\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"info\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"flame\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"question\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"warning\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"failure\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"spark\":\n url('data:image/svg+xml;charset=utf-8,')\n);\n\n@mixin icons {\n @each $name, $glyph in $icons {\n --icon-#{$name}: #{$glyph};\n }\n}\n","// Admonitions\n\n// Structure of these is:\n// admonition-class: color \"icon-name\";\n//\n// The colors are translated into CSS variables below. The icons are\n// used directly in the main declarations to set the `mask-image` in\n// the title.\n\n// prettier-ignore\n$admonitions: (\n // Each of these has an reST directives for it.\n \"caution\": #ff9100 \"spark\",\n \"warning\": #ff9100 \"warning\",\n \"danger\": #ff5252 \"spark\",\n \"attention\": #ff5252 \"warning\",\n \"error\": #ff5252 \"failure\",\n \"hint\": #00c852 \"question\",\n \"tip\": #00c852 \"info\",\n \"important\": #00bfa5 \"flame\",\n \"note\": #00b0ff \"pencil\",\n \"seealso\": #448aff \"info\",\n \"admonition-todo\": #808080 \"pencil\"\n);\n\n@mixin default-admonition($color, $icon-name) {\n --color-admonition-title: #{$color};\n --color-admonition-title-background: #{rgba($color, 0.2)};\n\n --icon-admonition-default: var(--icon-#{$icon-name});\n}\n\n@mixin default-topic($color, $icon-name) {\n --color-topic-title: #{$color};\n --color-topic-title-background: #{rgba($color, 0.2)};\n\n --icon-topic-default: var(--icon-#{$icon-name});\n}\n\n@mixin admonitions {\n @each $name, $values in $admonitions {\n --color-admonition-title--#{$name}: #{nth($values, 1)};\n --color-admonition-title-background--#{$name}: #{rgba(\n nth($values, 1),\n 0.2\n )};\n }\n}\n","// Colors used throughout this theme.\n//\n// The aim is to give the user more control. Thus, instead of hard-coding colors\n// in various parts of the stylesheet, the approach taken is to define all\n// colors as CSS variables and reusing them in all the places.\n//\n// `colors-dark` depends on `colors` being included at a lower specificity.\n\n@mixin colors {\n --color-problematic: #b30000;\n\n // Base Colors\n --color-foreground-primary: black; // for main text and headings\n --color-foreground-secondary: #5a5c63; // for secondary text\n --color-foreground-muted: #646776; // for muted text\n --color-foreground-border: #878787; // for content borders\n\n --color-background-primary: white; // for content\n --color-background-secondary: #f8f9fb; // for navigation + ToC\n --color-background-hover: #efeff4ff; // for navigation-item hover\n --color-background-hover--transparent: #efeff400;\n --color-background-border: #eeebee; // for UI borders\n --color-background-item: #ccc; // for \"background\" items (eg: copybutton)\n\n // Announcements\n --color-announcement-background: #000000dd;\n --color-announcement-text: #eeebee;\n\n // Brand colors\n --color-brand-primary: #2962ff;\n --color-brand-content: #2a5adf;\n\n // API documentation\n --color-api-background: var(--color-background-hover--transparent);\n --color-api-background-hover: var(--color-background-hover);\n --color-api-overall: var(--color-foreground-secondary);\n --color-api-name: var(--color-problematic);\n --color-api-pre-name: var(--color-problematic);\n --color-api-paren: var(--color-foreground-secondary);\n --color-api-keyword: var(--color-foreground-primary);\n --color-highlight-on-target: #ffffcc;\n\n // Inline code background\n --color-inline-code-background: var(--color-background-secondary);\n\n // Highlighted text (search)\n --color-highlighted-background: #ddeeff;\n --color-highlighted-text: var(--color-foreground-primary);\n\n // GUI Labels\n --color-guilabel-background: #ddeeff80;\n --color-guilabel-border: #bedaf580;\n --color-guilabel-text: var(--color-foreground-primary);\n\n // Admonitions!\n --color-admonition-background: transparent;\n\n //////////////////////////////////////////////////////////////////////////////\n // Everything below this should be one of:\n // - var(...)\n // - *-gradient(...)\n // - special literal values (eg: transparent, none)\n //////////////////////////////////////////////////////////////////////////////\n\n // Tables\n --color-table-header-background: var(--color-background-secondary);\n --color-table-border: var(--color-background-border);\n\n // Cards\n --color-card-border: var(--color-background-secondary);\n --color-card-background: transparent;\n --color-card-marginals-background: var(--color-background-secondary);\n\n // Header\n --color-header-background: var(--color-background-primary);\n --color-header-border: var(--color-background-border);\n --color-header-text: var(--color-foreground-primary);\n\n // Sidebar (left)\n --color-sidebar-background: var(--color-background-secondary);\n --color-sidebar-background-border: var(--color-background-border);\n\n --color-sidebar-brand-text: var(--color-foreground-primary);\n --color-sidebar-caption-text: var(--color-foreground-muted);\n --color-sidebar-link-text: var(--color-foreground-secondary);\n --color-sidebar-link-text--top-level: var(--color-brand-primary);\n\n --color-sidebar-item-background: var(--color-sidebar-background);\n --color-sidebar-item-background--current: var(\n --color-sidebar-item-background\n );\n --color-sidebar-item-background--hover: linear-gradient(\n 90deg,\n var(--color-background-hover--transparent) 0%,\n var(--color-background-hover) var(--sidebar-item-spacing-horizontal),\n var(--color-background-hover) 100%\n );\n\n --color-sidebar-item-expander-background: transparent;\n --color-sidebar-item-expander-background--hover: var(\n --color-background-hover\n );\n\n --color-sidebar-search-text: var(--color-foreground-primary);\n --color-sidebar-search-background: var(--color-background-secondary);\n --color-sidebar-search-background--focus: var(--color-background-primary);\n --color-sidebar-search-border: var(--color-background-border);\n --color-sidebar-search-icon: var(--color-foreground-muted);\n\n // Table of Contents (right)\n --color-toc-background: var(--color-background-primary);\n --color-toc-title-text: var(--color-foreground-muted);\n --color-toc-item-text: var(--color-foreground-secondary);\n --color-toc-item-text--hover: var(--color-foreground-primary);\n --color-toc-item-text--active: var(--color-brand-primary);\n\n // Actual page contents\n --color-content-foreground: var(--color-foreground-primary);\n --color-content-background: transparent;\n\n // Links\n --color-link: var(--color-brand-content);\n --color-link--hover: var(--color-brand-content);\n --color-link-underline: var(--color-background-border);\n --color-link-underline--hover: var(--color-foreground-border);\n}\n\n@mixin colors-dark {\n --color-problematic: #ee5151;\n\n // Base Colors\n --color-foreground-primary: #ffffffcc; // for main text and headings\n --color-foreground-secondary: #9ca0a5; // for secondary text\n --color-foreground-muted: #81868d; // for muted text\n --color-foreground-border: #666666; // for content borders\n\n --color-background-primary: #131416; // for content\n --color-background-secondary: #1a1c1e; // for navigation + ToC\n --color-background-hover: #1e2124ff; // for navigation-item hover\n --color-background-hover--transparent: #1e212400;\n --color-background-border: #303335; // for UI borders\n --color-background-item: #444; // for \"background\" items (eg: copybutton)\n\n // Announcements\n --color-announcement-background: #000000dd;\n --color-announcement-text: #eeebee;\n\n // Brand colors\n --color-brand-primary: #2b8cee;\n --color-brand-content: #368ce2;\n\n // Highlighted text (search)\n --color-highlighted-background: #083563;\n\n // GUI Labels\n --color-guilabel-background: #08356380;\n --color-guilabel-border: #13395f80;\n\n // API documentation\n --color-api-keyword: var(--color-foreground-secondary);\n --color-highlight-on-target: #333300;\n\n // Admonitions\n --color-admonition-background: #18181a;\n\n // Cards\n --color-card-border: var(--color-background-secondary);\n --color-card-background: #18181a;\n --color-card-marginals-background: var(--color-background-hover);\n}\n","// This file contains the styling for making the content throughout the page,\n// including fonts, paragraphs, headings and spacing among these elements.\n\nbody\n font-family: var(--font-stack)\npre,\ncode,\nkbd,\nsamp\n font-family: var(--font-stack--monospace)\n\n// Make fonts look slightly nicer.\nbody\n -webkit-font-smoothing: antialiased\n -moz-osx-font-smoothing: grayscale\n\n// Line height from Bootstrap 4.1\narticle\n line-height: 1.5\n\n//\n// Headings\n//\nh1,\nh2,\nh3,\nh4,\nh5,\nh6\n line-height: 1.25\n font-weight: bold\n\n border-radius: 0.5rem\n margin-top: 0.5rem\n margin-bottom: 0.5rem\n margin-left: -0.5rem\n margin-right: -0.5rem\n padding-left: 0.5rem\n padding-right: 0.5rem\n\n + p\n margin-top: 0\n\nh1\n font-size: 2.5em\n margin-top: 1.75rem\n margin-bottom: 1rem\nh2\n font-size: 2em\n margin-top: 1.75rem\nh3\n font-size: 1.5em\nh4\n font-size: 1.25em\nh5\n font-size: 1.125em\nh6\n font-size: 1em\n\nsmall\n opacity: 75%\n font-size: 80%\n\n// Paragraph\np\n margin-top: 0.5rem\n margin-bottom: 0.75rem\n\n// Horizontal rules\nhr.docutils\n height: 1px\n padding: 0\n margin: 2rem 0\n background-color: var(--color-background-border)\n border: 0\n\n.centered\n text-align: center\n\n// Links\na\n text-decoration: underline\n\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline)\n\n &:hover\n color: var(--color-link--hover)\n text-decoration-color: var(--color-link-underline--hover)\n &.muted-link\n color: inherit\n &:hover\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline--hover)\n","// This file contains the styles for the overall layouting of the documentation\n// skeleton, including the responsive changes as well as sidebar toggles.\n//\n// This is implemented as a mobile-last design, which isn't ideal, but it is\n// reasonably good-enough and I got pretty tired by the time I'd finished this\n// to move the rules around to fix this. Shouldn't take more than 3-4 hours,\n// if you know what you're doing tho.\n\n// HACK: Not all browsers account for the scrollbar width in media queries.\n// This results in horizontal scrollbars in the breakpoint where we go\n// from displaying everything to hiding the ToC. We accomodate for this by\n// adding a bit of padding to the TOC drawer, disabling the horizontal\n// scrollbar and allowing the scrollbars to cover the padding.\n// https://www.456bereastreet.com/archive/201301/media_query_width_and_vertical_scrollbars/\n\n// HACK: Always having the scrollbar visible, prevents certain browsers from\n// causing the content to stutter horizontally between taller-than-viewport and\n// not-taller-than-viewport pages.\n\nhtml\n overflow-x: hidden\n overflow-y: scroll\n scroll-behavior: smooth\n\n.sidebar-scroll, .toc-scroll, article[role=main] *\n // Override Firefox scrollbar style\n scrollbar-width: thin\n scrollbar-color: var(--color-foreground-border) transparent\n\n // Override Chrome scrollbar styles\n &::-webkit-scrollbar\n width: 0.25rem\n height: 0.25rem\n &::-webkit-scrollbar-thumb\n background-color: var(--color-foreground-border)\n border-radius: 0.125rem\n\n//\n// Overalls\n//\nhtml,\nbody\n height: 100%\n color: var(--color-foreground-primary)\n background: var(--color-background-primary)\n\narticle\n color: var(--color-content-foreground)\n background: var(--color-content-background)\n\n.page\n display: flex\n // fill the viewport for pages with little content.\n min-height: 100%\n\n.mobile-header\n width: 100%\n height: var(--header-height)\n background-color: var(--color-header-background)\n color: var(--color-header-text)\n border-bottom: 1px solid var(--color-header-border)\n\n // Looks like sub-script/super-script have this, and we need this to\n // be \"on top\" of those.\n z-index: 10\n\n // We don't show the header on large screens.\n display: none\n\n // Add shadow when scrolled\n &.scrolled\n border-bottom: none\n box-shadow: 0 0 0.2rem rgba(0, 0, 0, 0.1), 0 0.2rem 0.4rem rgba(0, 0, 0, 0.2)\n\n .header-center\n a\n color: var(--color-header-text)\n text-decoration: none\n\n.main\n display: flex\n flex: 1\n\n// Sidebar (left) also covers the entire left portion of screen.\n.sidebar-drawer\n box-sizing: border-box\n\n border-right: 1px solid var(--color-sidebar-background-border)\n background: var(--color-sidebar-background)\n\n display: flex\n justify-content: flex-end\n // These next two lines took me two days to figure out.\n width: calc((100% - #{$full-width}) / 2 + #{$sidebar-width})\n min-width: $sidebar-width\n\n// Scroll-along sidebars\n.sidebar-container,\n.toc-drawer\n box-sizing: border-box\n width: $sidebar-width\n\n.toc-drawer\n background: var(--color-toc-background)\n // See HACK described on top of this document\n padding-right: 1rem\n\n.sidebar-sticky,\n.toc-sticky\n position: sticky\n top: 0\n height: min(100%, 100vh)\n height: 100vh\n\n display: flex\n flex-direction: column\n\n.sidebar-scroll,\n.toc-scroll\n flex-grow: 1\n flex-shrink: 1\n\n overflow: auto\n scroll-behavior: smooth\n\n// Central items.\n.content\n padding: 0 $content-padding\n width: $content-width\n\n display: flex\n flex-direction: column\n justify-content: space-between\n\n.icon\n display: inline-block\n height: 1rem\n width: 1rem\n svg\n width: 100%\n height: 100%\n\n//\n// Accommodate announcement banner\n//\n.announcement\n background-color: var(--color-announcement-background)\n color: var(--color-announcement-text)\n\n height: var(--header-height)\n display: flex\n align-items: center\n overflow-x: auto\n & + .page\n min-height: calc(100% - var(--header-height))\n\n.announcement-content\n box-sizing: border-box\n padding: 0.5rem\n min-width: 100%\n white-space: nowrap\n text-align: center\n\n a\n color: var(--color-announcement-text)\n text-decoration-color: var(--color-announcement-text)\n\n &:hover\n color: var(--color-announcement-text)\n text-decoration-color: var(--color-link--hover)\n\n////////////////////////////////////////////////////////////////////////////////\n// Toggles for theme\n////////////////////////////////////////////////////////////////////////////////\n.no-js .theme-toggle-container // don't show theme toggle if there's no JS\n display: none\n\n.theme-toggle-container\n vertical-align: middle\n\n.theme-toggle\n cursor: pointer\n border: none\n padding: 0\n background: transparent\n\n.theme-toggle svg\n vertical-align: middle\n height: 1rem\n width: 1rem\n color: var(--color-foreground-primary)\n display: none\n\n.theme-toggle-header\n float: left\n padding: 1rem 0.5rem\n\n////////////////////////////////////////////////////////////////////////////////\n// Toggles for elements\n////////////////////////////////////////////////////////////////////////////////\n.toc-overlay-icon, .nav-overlay-icon\n display: none\n cursor: pointer\n\n .icon\n color: var(--color-foreground-secondary)\n height: 1rem\n width: 1rem\n\n.toc-header-icon, .nav-overlay-icon\n // for when we set display: flex\n justify-content: center\n align-items: center\n\n.toc-content-icon\n height: 1.5rem\n width: 1.5rem\n\n.content-icon-container\n float: right\n display: flex\n margin-top: 1.5rem\n margin-left: 1rem\n margin-bottom: 1rem\n gap: 0.5rem\n\n .edit-this-page svg\n color: inherit\n height: 1rem\n width: 1rem\n\n.sidebar-toggle\n position: absolute\n display: none\n// \n.sidebar-toggle[name=\"__toc\"]\n left: 20px\n.sidebar-toggle:checked\n left: 40px\n// \n\n.overlay\n position: fixed\n top: 0\n width: 0\n height: 0\n\n transition: width 0ms, height 0ms, opacity 250ms ease-out\n\n opacity: 0\n background-color: rgba(0, 0, 0, 0.54)\n.sidebar-overlay\n z-index: 20\n.toc-overlay\n z-index: 40\n\n// Keep things on top and smooth.\n.sidebar-drawer\n z-index: 30\n transition: left 250ms ease-in-out\n.toc-drawer\n z-index: 50\n transition: right 250ms ease-in-out\n\n// Show the Sidebar\n#__navigation:checked\n & ~ .sidebar-overlay\n width: 100%\n height: 100%\n opacity: 1\n & ~ .page\n .sidebar-drawer\n top: 0\n left: 0\n // Show the toc sidebar\n#__toc:checked\n & ~ .toc-overlay\n width: 100%\n height: 100%\n opacity: 1\n & ~ .page\n .toc-drawer\n top: 0\n right: 0\n\n////////////////////////////////////////////////////////////////////////////////\n// Back to top\n////////////////////////////////////////////////////////////////////////////////\n.back-to-top\n text-decoration: none\n\n display: none\n position: fixed\n left: 0\n top: 1rem\n padding: 0.5rem\n padding-right: 0.75rem\n border-radius: 1rem\n font-size: 0.8125rem\n\n background: var(--color-background-primary)\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), #6b728080 0px 0px 1px 0px\n\n z-index: 10\n\n margin-left: 50%\n transform: translateX(-50%)\n svg\n height: 1rem\n width: 1rem\n fill: currentColor\n display: inline-block\n\n span\n margin-left: 0.25rem\n\n .show-back-to-top &\n display: flex\n align-items: center\n\n////////////////////////////////////////////////////////////////////////////////\n// Responsive layouting\n////////////////////////////////////////////////////////////////////////////////\n// Make things a bit bigger on bigger screens.\n@media (min-width: $full-width + $sidebar-width)\n html\n font-size: 110%\n\n@media (max-width: $full-width)\n // Collapse \"toc\" into the icon.\n .toc-content-icon\n display: flex\n .toc-drawer\n position: fixed\n height: 100vh\n top: 0\n right: -$sidebar-width\n border-left: 1px solid var(--color-background-muted)\n .toc-tree\n border-left: none\n font-size: var(--toc-font-size--mobile)\n\n // Accomodate for a changed content width.\n .sidebar-drawer\n width: calc((100% - #{$full-width - $sidebar-width}) / 2 + #{$sidebar-width})\n\n@media (max-width: $full-width - $sidebar-width)\n // Collapse \"navigation\".\n .nav-overlay-icon\n display: flex\n .sidebar-drawer\n position: fixed\n height: 100vh\n width: $sidebar-width\n\n top: 0\n left: -$sidebar-width\n\n // Swap which icon is visible.\n .toc-header-icon\n display: flex\n .toc-content-icon, .theme-toggle-content\n display: none\n .theme-toggle-header\n display: block\n\n // Show the header.\n .mobile-header\n position: sticky\n top: 0\n display: flex\n justify-content: space-between\n align-items: center\n\n .header-left,\n .header-right\n display: flex\n height: var(--header-height)\n padding: 0 var(--header-padding)\n label\n height: 100%\n width: 100%\n user-select: none\n\n .nav-overlay-icon .icon,\n .theme-toggle svg\n height: 1.25rem\n width: 1.25rem\n\n // Add a scroll margin for the content\n :target\n scroll-margin-top: var(--header-height)\n\n // Show back-to-top below the header\n .back-to-top\n top: calc(var(--header-height) + 0.5rem)\n\n // Center the page, and accommodate for the header.\n .page\n flex-direction: column\n justify-content: center\n .content\n margin-left: auto\n margin-right: auto\n\n@media (max-width: $content-width + 2* $content-padding)\n // Content should respect window limits.\n .content\n width: 100%\n overflow-x: auto\n\n@media (max-width: $content-width)\n .content\n padding: 0 $content-padding--small\n // Don't float sidebars to the right.\n article aside.sidebar\n float: none\n width: 100%\n margin: 1rem 0\n","// Overall Layout Variables\n//\n// Because CSS variables can't be used in media queries. The fact that this\n// makes the layout non-user-configurable is a good thing.\n$content-padding: 3em;\n$content-padding--small: 1em;\n$content-width: 46em;\n$sidebar-width: 15em;\n$full-width: $content-width + 2 * ($content-padding + $sidebar-width);\n","//\n// The design here is strongly inspired by mkdocs-material.\n.admonition, .topic\n margin: 1rem auto\n padding: 0 0.5rem 0.5rem 0.5rem\n\n background: var(--color-admonition-background)\n\n border-radius: 0.2rem\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n font-size: var(--admonition-font-size)\n\n overflow: hidden\n page-break-inside: avoid\n\n // First element should have no margin, since the title has it.\n > :nth-child(2)\n margin-top: 0\n\n // Last item should have no margin, since we'll control that w/ padding\n > :last-child\n margin-bottom: 0\n\np.admonition-title, p.topic-title\n position: relative\n margin: 0 -0.5rem 0.5rem\n padding-left: 2rem\n padding-right: .5rem\n padding-top: .4rem\n padding-bottom: .4rem\n\n font-weight: 500\n font-size: var(--admonition-title-font-size)\n line-height: 1.3\n\n // Our fancy icon\n &::before\n content: \"\"\n position: absolute\n left: 0.5rem\n width: 1rem\n height: 1rem\n\n// Default styles\np.admonition-title\n background-color: var(--color-admonition-title-background)\n &::before\n background-color: var(--color-admonition-title)\n mask-image: var(--icon-admonition-default)\n mask-repeat: no-repeat\n\np.topic-title\n background-color: var(--color-topic-title-background)\n &::before\n background-color: var(--color-topic-title)\n mask-image: var(--icon-topic-default)\n mask-repeat: no-repeat\n\n//\n// Variants\n//\n.admonition\n border-left: 0.2rem solid var(--color-admonition-title)\n\n @each $type, $value in $admonitions\n &.#{$type}\n border-left-color: var(--color-admonition-title--#{$type})\n > .admonition-title\n background-color: var(--color-admonition-title-background--#{$type})\n &::before\n background-color: var(--color-admonition-title--#{$type})\n mask-image: var(--icon-#{nth($value, 2)})\n\n.admonition-todo > .admonition-title\n text-transform: uppercase\n","// This file stylizes the API documentation (stuff generated by autodoc). It's\n// deeply nested due to how autodoc structures the HTML without enough classes\n// to select the relevant items.\n\n// API docs!\ndl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)\n // Tweak the spacing of all the things!\n dd\n margin-left: 2rem\n > :first-child\n margin-top: 0.125rem\n > :last-child\n margin-bottom: 0.75rem\n\n // This is used for the arguments\n .field-list\n margin-bottom: 0.75rem\n\n // \"Headings\" (like \"Parameters\" and \"Return\")\n > dt\n text-transform: uppercase\n font-size: var(--font-size--small)\n\n dd:empty\n margin-bottom: 0.5rem\n dd > ul\n margin-left: -1.2rem\n > li\n > p:nth-child(2)\n margin-top: 0\n // When the last-empty-paragraph follows a paragraph, it doesn't need\n // to augument the existing spacing.\n > p + p:last-child:empty\n margin-top: 0\n margin-bottom: 0\n\n // Colorize the elements\n > dt\n color: var(--color-api-overall)\n\n.sig:not(.sig-inline)\n font-weight: bold\n\n font-size: var(--api-font-size)\n font-family: var(--font-stack--monospace)\n\n margin-left: -0.25rem\n margin-right: -0.25rem\n padding-top: 0.25rem\n padding-bottom: 0.25rem\n padding-right: 0.5rem\n\n // These are intentionally em, to properly match the font size.\n padding-left: 3em\n text-indent: -2.5em\n\n border-radius: 0.25rem\n\n background: var(--color-api-background)\n transition: background 100ms ease-out\n\n &:hover\n background: var(--color-api-background-hover)\n\n // adjust the size of the [source] link on the right.\n a.reference\n .viewcode-link\n font-weight: normal\n width: 3.5rem\n\n // Break words when they're too long\n span.pre\n overflow-wrap: anywhere\n\nem.property\n font-style: normal\n &:first-child\n color: var(--color-api-keyword)\n.sig-name\n color: var(--color-api-name)\n.sig-prename\n font-weight: normal\n color: var(--color-api-pre-name)\n.sig-paren\n color: var(--color-api-paren)\n.sig-param\n font-style: normal\n\n.versionmodified\n font-style: italic\ndiv.versionadded, div.versionchanged, div.deprecated\n p\n margin-top: 0.125rem\n margin-bottom: 0.125rem\n\n// Align the [docs] and [source] to the right.\n.viewcode-link, .viewcode-back\n float: right\n text-align: right\n",".line-block\n margin-top: 0.5rem\n margin-bottom: 0.75rem\n .line-block\n margin-top: 0rem\n margin-bottom: 0rem\n padding-left: 1rem\n","// Captions\narticle p.caption,\ntable > caption,\n.code-block-caption\n font-size: var(--font-size--small)\n text-align: center\n\n// Caption above a TOCTree\n.toctree-wrapper.compound\n .caption, :not(.caption) > .caption-text\n font-size: var(--font-size--small)\n text-transform: uppercase\n\n text-align: initial\n margin-bottom: 0\n\n > ul\n margin-top: 0\n margin-bottom: 0\n","// Inline code\ncode.literal, .sig-inline\n background: var(--color-inline-code-background)\n border-radius: 0.2em\n // Make the font smaller, and use padding to recover.\n font-size: var(--font-size--small--2)\n padding: 0.1em 0.2em\n\n overflow-wrap: break-word\n\n p &\n border: 1px solid var(--color-background-border)\n\n.sig-inline\n font-family: var(--font-stack--monospace)\n\n// Code and Literal Blocks\n$code-spacing-vertical: 0.625rem\n$code-spacing-horizontal: 0.875rem\n\n// Wraps every literal block + line numbers.\ndiv[class*=\" highlight-\"],\ndiv[class^=\"highlight-\"]\n margin: 1em 0\n display: flex\n\n .table-wrapper\n margin: 0\n padding: 0\n\npre\n margin: 0\n padding: 0\n overflow: auto\n\n // Needed to have more specificity than pygments' \"pre\" selector. :(\n article[role=\"main\"] .highlight &\n line-height: 1.5\n\n &.literal-block,\n .highlight &\n font-size: var(--code-font-size)\n padding: $code-spacing-vertical $code-spacing-horizontal\n\n // Make it look like all the other blocks.\n &.literal-block\n margin-top: 1rem\n margin-bottom: 1rem\n\n border-radius: 0.2rem\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n\n// All code is always contained in this.\n.highlight\n width: 100%\n border-radius: 0.2rem\n\n // Make line numbers and prompts un-selectable.\n .gp, span.linenos\n user-select: none\n pointer-events: none\n\n // Expand the line-highlighting.\n .hll\n display: block\n margin-left: -$code-spacing-horizontal\n margin-right: -$code-spacing-horizontal\n padding-left: $code-spacing-horizontal\n padding-right: $code-spacing-horizontal\n\n/* Make code block captions be nicely integrated */\n.code-block-caption\n display: flex\n padding: $code-spacing-vertical $code-spacing-horizontal\n\n border-radius: 0.25rem\n border-bottom-left-radius: 0\n border-bottom-right-radius: 0\n font-weight: 300\n border-bottom: 1px solid\n\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n border-color: var(--color-background-border)\n\n + div[class]\n margin-top: 0\n pre\n border-top-left-radius: 0\n border-top-right-radius: 0\n\n// When `html_codeblock_linenos_style` is table.\n.highlighttable\n width: 100%\n display: block\n tbody\n display: block\n\n tr\n display: flex\n\n // Line numbers\n td.linenos\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n padding: $code-spacing-vertical $code-spacing-horizontal\n padding-right: 0\n border-top-left-radius: 0.2rem\n border-bottom-left-radius: 0.2rem\n\n .linenodiv\n padding-right: $code-spacing-horizontal\n font-size: var(--code-font-size)\n box-shadow: -0.0625rem 0 var(--color-foreground-border) inset\n\n // Actual code\n td.code\n padding: 0\n display: block\n flex: 1\n overflow: hidden\n\n .highlight\n border-top-left-radius: 0\n border-bottom-left-radius: 0\n\n// When `html_codeblock_linenos_style` is inline.\n.highlight\n span.linenos\n display: inline-block\n padding-left: 0\n padding-right: $code-spacing-horizontal\n margin-right: $code-spacing-horizontal\n box-shadow: -0.0625rem 0 var(--color-foreground-border) inset\n","// Inline Footnote Reference\n.footnote-reference\n font-size: var(--font-size--small--4)\n vertical-align: super\n\n// Definition list, listing the content of each note.\n// docutils <= 0.17\ndl.footnote.brackets\n font-size: var(--font-size--small)\n color: var(--color-foreground-secondary)\n\n display: grid\n grid-template-columns: max-content auto\n dt\n margin: 0\n > .fn-backref\n margin-left: 0.25rem\n\n &:after\n content: \":\"\n\n .brackets\n &:before\n content: \"[\"\n &:after\n content: \"]\"\n\n dd\n margin: 0\n padding: 0 1rem\n\n// docutils >= 0.18\naside.footnote\n font-size: var(--font-size--small)\n color: var(--color-foreground-secondary)\n\naside.footnote > span,\ndiv.citation > span\n float: left\n font-weight: 500\n padding-right: 0.25rem\n\naside.footnote > p,\ndiv.citation > p\n margin-left: 2rem\n","//\n// Figures\n//\nimg\n box-sizing: border-box\n max-width: 100%\n height: auto\n\narticle\n figure, .figure\n border-radius: 0.2rem\n\n margin: 0\n :last-child\n margin-bottom: 0\n\n .align-left\n float: left\n clear: left\n margin: 0 1rem 1rem\n\n .align-right\n float: right\n clear: right\n margin: 0 1rem 1rem\n\n .align-default,\n .align-center\n display: block\n text-align: center\n margin-left: auto\n margin-right: auto\n\n // WELL, table needs to be stylised like a table.\n table.align-default\n display: table\n text-align: initial\n",".genindex-jumpbox, .domainindex-jumpbox\n border-top: 1px solid var(--color-background-border)\n border-bottom: 1px solid var(--color-background-border)\n padding: 0.25rem\n\n.genindex-section, .domainindex-section\n h2\n margin-top: 0.75rem\n margin-bottom: 0.5rem\n ul\n margin-top: 0\n margin-bottom: 0\n","ul,\nol\n padding-left: 1.2rem\n\n // Space lists out like paragraphs\n margin-top: 1rem\n margin-bottom: 1rem\n // reduce margins within li.\n li\n > p:first-child\n margin-top: 0.25rem\n margin-bottom: 0.25rem\n\n > p:last-child\n margin-top: 0.25rem\n\n > ul,\n > ol\n margin-top: 0.5rem\n margin-bottom: 0.5rem\n\nol\n &.arabic\n list-style: decimal\n &.loweralpha\n list-style: lower-alpha\n &.upperalpha\n list-style: upper-alpha\n &.lowerroman\n list-style: lower-roman\n &.upperroman\n list-style: upper-roman\n\n// Don't space lists out when they're \"simple\" or in a `.. toctree::`\n.simple,\n.toctree-wrapper\n li\n > ul,\n > ol\n margin-top: 0\n margin-bottom: 0\n\n// Definition Lists\n.field-list,\n.option-list,\ndl:not([class]),\ndl.simple,\ndl.footnote,\ndl.glossary\n dt\n font-weight: 500\n margin-top: 0.25rem\n + dt\n margin-top: 0\n\n .classifier::before\n content: \":\"\n margin-left: 0.2rem\n margin-right: 0.2rem\n\n dd\n > p:first-child,\n ul\n margin-top: 0.125rem\n\n ul\n margin-bottom: 0.125rem\n",".math-wrapper\n width: 100%\n overflow-x: auto\n\ndiv.math\n position: relative\n text-align: center\n\n .headerlink,\n &:focus .headerlink\n display: none\n\n &:hover .headerlink\n display: inline-block\n\n span.eqno\n position: absolute\n right: 0.5rem\n top: 50%\n transform: translate(0, -50%)\n z-index: 1\n","// Abbreviations\nabbr[title]\n cursor: help\n\n// \"Problematic\" content, as identified by Sphinx\n.problematic\n color: var(--color-problematic)\n\n// Keyboard / Mouse \"instructions\"\nkbd:not(.compound)\n margin: 0 0.2rem\n padding: 0 0.2rem\n border-radius: 0.2rem\n border: 1px solid var(--color-foreground-border)\n color: var(--color-foreground-primary)\n vertical-align: text-bottom\n\n font-size: var(--font-size--small--3)\n display: inline-block\n\n box-shadow: 0 0.0625rem 0 rgba(0, 0, 0, 0.2), inset 0 0 0 0.125rem var(--color-background-primary)\n\n background-color: var(--color-background-secondary)\n\n// Blockquote\nblockquote\n border-left: 4px solid var(--color-background-border)\n background: var(--color-background-secondary)\n\n margin-left: 0\n margin-right: 0\n padding: 0.5rem 1rem\n\n .attribution\n font-weight: 600\n text-align: right\n\n &.pull-quote,\n &.highlights\n font-size: 1.25em\n\n &.epigraph,\n &.pull-quote\n border-left-width: 0\n border-radius: 0.5rem\n\n &.highlights\n border-left-width: 0\n background: transparent\n\n// Center align embedded-in-text images\np .reference img\n vertical-align: middle\n","p.rubric\n line-height: 1.25\n font-weight: bold\n font-size: 1.125em\n\n // For Numpy-style documentation that's got rubrics within it.\n // https://github.com/pradyunsg/furo/discussions/505\n dd &\n line-height: inherit\n font-weight: inherit\n\n font-size: var(--font-size--small)\n text-transform: uppercase\n","article .sidebar\n float: right\n clear: right\n width: 30%\n\n margin-left: 1rem\n margin-right: 0\n\n border-radius: 0.2rem\n background-color: var(--color-background-secondary)\n border: var(--color-background-border) 1px solid\n\n > *\n padding-left: 1rem\n padding-right: 1rem\n\n > ul, > ol // lists need additional padding, because bullets.\n padding-left: 2.2rem\n\n .sidebar-title\n margin: 0\n padding: 0.5rem 1rem\n border-bottom: var(--color-background-border) 1px solid\n\n font-weight: 500\n\n// TODO: subtitle\n// TODO: dedicated variables?\n",".table-wrapper\n width: 100%\n overflow-x: auto\n margin-top: 1rem\n margin-bottom: 0.5rem\n padding: 0.2rem 0.2rem 0.75rem\n\ntable.docutils\n border-radius: 0.2rem\n border-spacing: 0\n border-collapse: collapse\n\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n th\n background: var(--color-table-header-background)\n\n td,\n th\n // Space things out properly\n padding: 0 0.25rem\n\n // Get the borders looking just-right.\n border-left: 1px solid var(--color-table-border)\n border-right: 1px solid var(--color-table-border)\n border-bottom: 1px solid var(--color-table-border)\n\n p\n margin: 0.25rem\n\n &:first-child\n border-left: none\n &:last-child\n border-right: none\n\n // MyST-parser tables set these classes for control of column alignment\n &.text-left\n text-align: left\n &.text-right\n text-align: right\n &.text-center\n text-align: center\n",":target\n scroll-margin-top: 0.5rem\n\n@media (max-width: $full-width - $sidebar-width)\n :target\n scroll-margin-top: calc(0.5rem + var(--header-height))\n\n // When a heading is selected\n section > span:target\n scroll-margin-top: calc(0.8rem + var(--header-height))\n\n// Permalinks\n.headerlink\n font-weight: 100\n user-select: none\n\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\ndl dt,\np.caption,\nfigcaption p,\ntable > caption,\n.code-block-caption\n > .headerlink\n margin-left: 0.5rem\n visibility: hidden\n &:hover > .headerlink\n visibility: visible\n\n // Don't change to link-like, if someone adds the contents directive.\n > .toc-backref\n color: inherit\n text-decoration-line: none\n\n// Figure and table captions are special.\nfigure:hover > figcaption > p > .headerlink,\ntable:hover > caption > .headerlink\n visibility: visible\n\n:target >, // Regular section[id] style anchors\nspan:target ~ // Non-regular span[id] style \"extra\" anchors\n h1,\n h2,\n h3,\n h4,\n h5,\n h6\n &:nth-of-type(1)\n background-color: var(--color-highlight-on-target)\n // .headerlink\n // visibility: visible\n code.literal\n background-color: transparent\n\ntable:target > caption,\nfigure:target\n background-color: var(--color-highlight-on-target)\n\n// Inline page contents\n.this-will-duplicate-information-and-it-is-still-useful-here li :target\n background-color: var(--color-highlight-on-target)\n\n// Code block permalinks\n.literal-block-wrapper:target .code-block-caption\n background-color: var(--color-highlight-on-target)\n\n// When a definition list item is selected\n//\n// There isn't really an alternative to !important here, due to the\n// high-specificity of API documentation's selector.\ndt:target\n background-color: var(--color-highlight-on-target) !important\n\n// When a footnote reference is selected\n.footnote > dt:target + dd,\n.footnote-reference:target\n background-color: var(--color-highlight-on-target)\n",".guilabel\n background-color: var(--color-guilabel-background)\n border: 1px solid var(--color-guilabel-border)\n color: var(--color-guilabel-text)\n\n padding: 0 0.3em\n border-radius: 0.5em\n font-size: 0.9em\n","// This file contains the styles used for stylizing the footer that's shown\n// below the content.\n\nfooter\n font-size: var(--font-size--small)\n display: flex\n flex-direction: column\n\n margin-top: 2rem\n\n// Bottom of page information\n.bottom-of-page\n display: flex\n align-items: center\n justify-content: space-between\n\n margin-top: 1rem\n padding-top: 1rem\n padding-bottom: 1rem\n\n color: var(--color-foreground-secondary)\n border-top: 1px solid var(--color-background-border)\n\n line-height: 1.5\n\n @media (max-width: $content-width)\n text-align: center\n flex-direction: column-reverse\n gap: 0.25rem\n\n .left-details\n font-size: var(--font-size--small)\n\n .right-details\n display: flex\n flex-direction: column\n gap: 0.25rem\n text-align: right\n\n .icons\n display: flex\n justify-content: flex-end\n gap: 0.25rem\n font-size: 1rem\n\n a\n text-decoration: none\n\n svg,\n img\n font-size: 1.125rem\n height: 1em\n width: 1em\n\n// Next/Prev page information\n.related-pages\n a\n display: flex\n align-items: center\n\n text-decoration: none\n &:hover .page-info .title\n text-decoration: underline\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline)\n\n svg.furo-related-icon,\n svg.furo-related-icon > use\n flex-shrink: 0\n\n color: var(--color-foreground-border)\n\n width: 0.75rem\n height: 0.75rem\n margin: 0 0.5rem\n\n &.next-page\n max-width: 50%\n\n float: right\n clear: right\n text-align: right\n\n &.prev-page\n max-width: 50%\n\n float: left\n clear: left\n\n svg\n transform: rotate(180deg)\n\n.page-info\n display: flex\n flex-direction: column\n overflow-wrap: anywhere\n\n .next-page &\n align-items: flex-end\n\n .context\n display: flex\n align-items: center\n\n padding-bottom: 0.1rem\n\n color: var(--color-foreground-muted)\n font-size: var(--font-size--small)\n text-decoration: none\n","//\n// Search Page Listing\n//\nul.search\n padding-left: 0\n list-style: none\n\n li\n padding: 1rem 0\n border-bottom: 1px solid var(--color-background-border)\n\n//\n// Highlighted by links in search page\n//\n[role=main] .highlighted\n background-color: var(--color-highlighted-background)\n color: var(--color-highlighted-text)\n","// This file contains the styles for the contents of the left sidebar, which\n// contains the navigation tree, logo, search etc.\n\n////////////////////////////////////////////////////////////////////////////////\n// Brand on top of the scrollable tree.\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-brand\n display: flex\n flex-direction: column\n flex-shrink: 0\n\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n text-decoration: none\n\n.sidebar-brand-text\n color: var(--color-sidebar-brand-text)\n overflow-wrap: break-word\n margin: var(--sidebar-item-spacing-vertical) 0\n font-size: 1.5rem\n\n.sidebar-logo-container\n margin: var(--sidebar-item-spacing-vertical) 0\n\n.sidebar-logo\n margin: 0 auto\n display: block\n max-width: 100%\n\n////////////////////////////////////////////////////////////////////////////////\n// Search\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-search-container\n display: flex\n align-items: center\n margin-top: var(--sidebar-search-space-above)\n\n position: relative\n\n background: var(--color-sidebar-search-background)\n &:hover,\n &:focus-within\n background: var(--color-sidebar-search-background--focus)\n\n &::before\n content: \"\"\n position: absolute\n left: var(--sidebar-item-spacing-horizontal)\n width: var(--sidebar-search-icon-size)\n height: var(--sidebar-search-icon-size)\n\n background-color: var(--color-sidebar-search-icon)\n mask-image: var(--icon-search)\n\n.sidebar-search\n box-sizing: border-box\n\n border: none\n border-top: 1px solid var(--color-sidebar-search-border)\n border-bottom: 1px solid var(--color-sidebar-search-border)\n\n padding-top: var(--sidebar-search-input-spacing-vertical)\n padding-bottom: var(--sidebar-search-input-spacing-vertical)\n padding-right: var(--sidebar-search-input-spacing-horizontal)\n padding-left: calc(var(--sidebar-item-spacing-horizontal) + var(--sidebar-search-input-spacing-horizontal) + var(--sidebar-search-icon-size))\n\n width: 100%\n\n color: var(--color-sidebar-search-foreground)\n background: transparent\n z-index: 10\n\n &:focus\n outline: none\n\n &::placeholder\n font-size: var(--sidebar-search-input-font-size)\n\n//\n// Hide Search Matches link\n//\n#searchbox .highlight-link\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal) 0\n margin: 0\n text-align: center\n\n a\n color: var(--color-sidebar-search-icon)\n font-size: var(--font-size--small--2)\n\n////////////////////////////////////////////////////////////////////////////////\n// Structure/Skeleton of the navigation tree (left)\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-tree\n font-size: var(--sidebar-item-font-size)\n margin-top: var(--sidebar-tree-space-above)\n margin-bottom: var(--sidebar-item-spacing-vertical)\n\n ul\n padding: 0\n margin-top: 0\n margin-bottom: 0\n\n display: flex\n flex-direction: column\n\n list-style: none\n\n li\n position: relative\n margin: 0\n\n > ul\n margin-left: var(--sidebar-item-spacing-horizontal)\n\n .icon\n color: var(--color-sidebar-link-text)\n\n .reference\n box-sizing: border-box\n color: var(--color-sidebar-link-text)\n\n // Fill the parent.\n display: inline-block\n line-height: var(--sidebar-item-line-height)\n text-decoration: none\n\n // Don't allow long words to cause wrapping.\n overflow-wrap: anywhere\n\n height: 100%\n width: 100%\n\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n\n &:hover\n background: var(--color-sidebar-item-background--hover)\n\n // Add a nice little \"external-link\" arrow here.\n &.external::after\n content: url('data:image/svg+xml,')\n margin: 0 0.25rem\n vertical-align: middle\n color: var(--color-sidebar-link-text)\n\n // Make the current page reference bold.\n .current-page > .reference\n font-weight: bold\n\n label\n position: absolute\n top: 0\n right: 0\n height: var(--sidebar-item-height)\n width: var(--sidebar-expander-width)\n\n cursor: pointer\n user-select: none\n\n display: flex\n justify-content: center\n align-items: center\n\n .caption, :not(.caption) > .caption-text\n font-size: var(--sidebar-caption-font-size)\n color: var(--color-sidebar-caption-text)\n\n font-weight: bold\n text-transform: uppercase\n\n margin: var(--sidebar-caption-space-above) 0 0 0\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n\n // If it has children, add a bit more padding to wrap the content to avoid\n // overlapping with the