Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/autotune_process.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/bounds.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/comm_analysis.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/comms.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/config.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/constant_folding.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/coordinate_descent_tuner.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/debug.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/decomposition.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/exc.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/freezing.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/graph.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/hooks.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/index_propagation.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/metrics.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/sizevars.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_operators.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_helpers.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_heuristics.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/virtualized.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/wrapper_benchmark.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/common.py +1295 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h +410 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__init__.py +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_kernel.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py +212 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_env.py +45 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_template.py +241 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/gemm_operation_extensions.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/gemm_operation_extensions.py +186 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_utils.py +257 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/gemm_template.py +706 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py +249 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__init__.py +1 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/bmm.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/conv.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_common.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_plus_mm.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/unpack_mixed_mm.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/conv.py +489 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/mm.py +305 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/mm_common.py +222 -0
- evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/__pycache__/tensor_factory_functions.cpython-310.pyc +0 -0
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/autotune_process.cpython-310.pyc
ADDED
|
Binary file (17.2 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/bounds.cpython-310.pyc
ADDED
|
Binary file (4.47 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/comm_analysis.cpython-310.pyc
ADDED
|
Binary file (4.48 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/comms.cpython-310.pyc
ADDED
|
Binary file (10.1 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-310.pyc
ADDED
|
Binary file (31.5 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/config.cpython-310.pyc
ADDED
|
Binary file (8.94 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/constant_folding.cpython-310.pyc
ADDED
|
Binary file (5.15 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/coordinate_descent_tuner.cpython-310.pyc
ADDED
|
Binary file (7.25 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-310.pyc
ADDED
|
Binary file (60.7 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/debug.cpython-310.pyc
ADDED
|
Binary file (16.7 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/decomposition.cpython-310.pyc
ADDED
|
Binary file (15.2 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/exc.cpython-310.pyc
ADDED
|
Binary file (4.3 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/freezing.cpython-310.pyc
ADDED
|
Binary file (9.3 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/graph.cpython-310.pyc
ADDED
|
Binary file (30.3 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/hooks.cpython-310.pyc
ADDED
|
Binary file (615 Bytes). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/index_propagation.cpython-310.pyc
ADDED
|
Binary file (10.4 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/metrics.cpython-310.pyc
ADDED
|
Binary file (4.7 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-310.pyc
ADDED
|
Binary file (46.2 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/sizevars.cpython-310.pyc
ADDED
|
Binary file (19.6 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_operators.cpython-310.pyc
ADDED
|
Binary file (1.26 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_helpers.cpython-310.pyc
ADDED
|
Binary file (4.14 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_heuristics.cpython-310.pyc
ADDED
|
Binary file (29.5 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/virtualized.cpython-310.pyc
ADDED
|
Binary file (14.3 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/__pycache__/wrapper_benchmark.cpython-310.pyc
ADDED
|
Binary file (8.88 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/common.py
ADDED
|
@@ -0,0 +1,1295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
import dataclasses
|
| 3 |
+
import functools
|
| 4 |
+
import itertools
|
| 5 |
+
import logging
|
| 6 |
+
import operator
|
| 7 |
+
import re
|
| 8 |
+
from collections import namedtuple
|
| 9 |
+
from itertools import chain
|
| 10 |
+
from typing import (
|
| 11 |
+
Any,
|
| 12 |
+
Callable,
|
| 13 |
+
ClassVar,
|
| 14 |
+
Dict,
|
| 15 |
+
List,
|
| 16 |
+
NamedTuple,
|
| 17 |
+
Optional,
|
| 18 |
+
Set,
|
| 19 |
+
Tuple,
|
| 20 |
+
Union,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
import sympy
|
| 24 |
+
from sympy.printing.printer import Printer
|
| 25 |
+
|
| 26 |
+
import torch
|
| 27 |
+
import torch.fx
|
| 28 |
+
from torch.utils._sympy.value_ranges import ValueRanges
|
| 29 |
+
|
| 30 |
+
from .. import config, metrics
|
| 31 |
+
from ..utils import (
|
| 32 |
+
DeferredLineBase,
|
| 33 |
+
do_bench,
|
| 34 |
+
free_symbol_startswith,
|
| 35 |
+
IndentedBuffer,
|
| 36 |
+
sympy_dot,
|
| 37 |
+
sympy_subs,
|
| 38 |
+
sympy_symbol,
|
| 39 |
+
unique,
|
| 40 |
+
)
|
| 41 |
+
from ..virtualized import ops, OpsValue, V
|
| 42 |
+
|
| 43 |
+
schedule_log = torch._logging.getArtifactLogger(__name__, "schedule")
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def data_type_logger(msg):
|
| 47 |
+
if schedule_log.isEnabledFor(logging.DEBUG):
|
| 48 |
+
schedule_log.debug("Data type propagation: %s", msg)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
TensorArg = namedtuple("TensorArg", ["name", "buffer", "dtype", "check_alignment"])
|
| 52 |
+
SizeArg = namedtuple("SizeArg", ["name", "expr"])
|
| 53 |
+
|
| 54 |
+
DeviceCodegen = namedtuple("DeviceCodegen", ["scheduling", "wrapper_codegen"])
|
| 55 |
+
device_codegens: Dict[str, DeviceCodegen] = {}
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# The code generated by Inductor consists of two main parts: kernel code and wrapper code.
|
| 59 |
+
# For any new backend looking to integrate with Inductor, customization of these two main
|
| 60 |
+
# parts are necessary to generate its specific code.
|
| 61 |
+
#
|
| 62 |
+
# Kernel code generation is determined by different Scheduling. Consequently, a new
|
| 63 |
+
# backend needs to provide a custom Scheduling for its unique kernel code generation. Currently,
|
| 64 |
+
# CppScheduling and TritonScheduling serve the C++/OpenMP and Triton backends, respectively.
|
| 65 |
+
#
|
| 66 |
+
# For the Wrapper, Inductor provides a WrapperCodeGen class to generate the Python wrapper code
|
| 67 |
+
# that bridges kernels. This allows out-of-tree backends to inherit from WrapperCodeGen,
|
| 68 |
+
# and override specific member functions to create backend-specific Python wrapper code.
|
| 69 |
+
#
|
| 70 |
+
# Other classes, such as CppKernel and TritonKernel, used for code generation, typically form part
|
| 71 |
+
# of the logic for either Scheduling or WrapperCodeGen. So the Scheduling and WrapperCodeGen interfaces
|
| 72 |
+
# provide flexibility to the backend. A backend can choose to implement these classes from scratch,
|
| 73 |
+
# or reuse them by extending and overriding as necessary. And Inductor provides the registration API,
|
| 74 |
+
# register_backend_for_device, to equip a new backend at runtime.
|
| 75 |
+
#
|
| 76 |
+
# Intel has developed a new backend on top of Triton to support Intel GPUs, leveraging these interfaces.
|
| 77 |
+
# This backend can be used as a reference:
|
| 78 |
+
# https://github.com/intel/intel-extension-for-pytorch/blob/5dcc9d57e5422cf295e1a1ee97896d6b6a554a85/intel_extension_for_pytorch/_inductor/__init__.py#L9
|
| 79 |
+
def register_backend_for_device(
|
| 80 |
+
device: str, device_scheduling: type, device_wrapper_codegen: type
|
| 81 |
+
):
|
| 82 |
+
device_codegens[device] = DeviceCodegen(device_scheduling, device_wrapper_codegen)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def get_scheduling_for_device(device: str):
|
| 86 |
+
return device_codegens[device].scheduling if device in device_codegens else None
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def get_wrapper_codegen_for_device(device: str):
|
| 90 |
+
return (
|
| 91 |
+
device_codegens[device].wrapper_codegen if device in device_codegens else None
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def index_prevent_reordering(index: List[sympy.Expr], index_vars, sizes):
|
| 96 |
+
from ..ir import FlexibleLayout
|
| 97 |
+
|
| 98 |
+
# added contiguous index prevents reordering
|
| 99 |
+
return [*index, sympy_dot(index_vars, FlexibleLayout.contiguous_strides(sizes))]
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@functools.lru_cache(None)
|
| 103 |
+
def boolean_ops():
|
| 104 |
+
return (
|
| 105 |
+
"is_inf",
|
| 106 |
+
"is_nan",
|
| 107 |
+
"bitwise_xor",
|
| 108 |
+
"logical_not",
|
| 109 |
+
"signbit",
|
| 110 |
+
"le",
|
| 111 |
+
"lt",
|
| 112 |
+
"ge",
|
| 113 |
+
"gt",
|
| 114 |
+
"eq",
|
| 115 |
+
"ne",
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
DTYPE_TO_COMPUTATION_DTYPE = {
|
| 120 |
+
torch.bfloat16: torch.float,
|
| 121 |
+
torch.float16: torch.float,
|
| 122 |
+
**{
|
| 123 |
+
dtype: dtype
|
| 124 |
+
for dtype in [
|
| 125 |
+
torch.bool,
|
| 126 |
+
torch.float32,
|
| 127 |
+
torch.float64,
|
| 128 |
+
torch.int8,
|
| 129 |
+
torch.int16,
|
| 130 |
+
torch.int32,
|
| 131 |
+
torch.int64,
|
| 132 |
+
torch.uint8,
|
| 133 |
+
]
|
| 134 |
+
},
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class DataTypePropagation:
|
| 139 |
+
def __init__(self, body) -> None:
|
| 140 |
+
self.body = body
|
| 141 |
+
self.graphs: Dict[Union[Callable[..., Any], str], Any] = {
|
| 142 |
+
"root": body.root_block.graph
|
| 143 |
+
}
|
| 144 |
+
for k, v in body.subblocks.items():
|
| 145 |
+
self.graphs[k] = v.graph
|
| 146 |
+
|
| 147 |
+
def deduce_node_dtype_by_inputs(self, node: torch.fx.Node):
|
| 148 |
+
inputs = node.all_input_nodes
|
| 149 |
+
input_nodes = [
|
| 150 |
+
n for n in inputs if isinstance(n, torch.fx.Node) and n.op != "placeholder"
|
| 151 |
+
]
|
| 152 |
+
if len(input_nodes) == 0:
|
| 153 |
+
return None
|
| 154 |
+
|
| 155 |
+
all_input_nodes_propogated = all(
|
| 156 |
+
OptimizationContext.key in n.meta
|
| 157 |
+
and n.meta[OptimizationContext.key].dtype is not None
|
| 158 |
+
for n in input_nodes
|
| 159 |
+
)
|
| 160 |
+
if not all_input_nodes_propogated:
|
| 161 |
+
return None
|
| 162 |
+
|
| 163 |
+
return functools.reduce(
|
| 164 |
+
torch.promote_types,
|
| 165 |
+
[n.meta[OptimizationContext.key].dtype for n in input_nodes],
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
def deduce_node_dtype_by_subgraph(self, node: torch.fx.Node):
|
| 169 |
+
sub_graph = self.graphs[node.target]
|
| 170 |
+
dtype = self.propagate_graph(sub_graph)
|
| 171 |
+
assert dtype
|
| 172 |
+
return dtype
|
| 173 |
+
|
| 174 |
+
def deduce_node_dtype(self, node: torch.fx.Node):
|
| 175 |
+
if node.target in boolean_ops():
|
| 176 |
+
return torch.bool
|
| 177 |
+
|
| 178 |
+
if node.op == "placeholder":
|
| 179 |
+
return None
|
| 180 |
+
|
| 181 |
+
if node.target == "output":
|
| 182 |
+
# we can infer output node if it only have 1 arg
|
| 183 |
+
if len(node.args) != 1:
|
| 184 |
+
return None
|
| 185 |
+
|
| 186 |
+
if node.target in (
|
| 187 |
+
"to_dtype",
|
| 188 |
+
"index_expr",
|
| 189 |
+
):
|
| 190 |
+
return node.args[-1]
|
| 191 |
+
|
| 192 |
+
if node.target in (
|
| 193 |
+
"rand",
|
| 194 |
+
"randn",
|
| 195 |
+
):
|
| 196 |
+
return torch.float
|
| 197 |
+
|
| 198 |
+
if node.target in (
|
| 199 |
+
"get_index",
|
| 200 |
+
"index_expr",
|
| 201 |
+
):
|
| 202 |
+
return torch.int64
|
| 203 |
+
|
| 204 |
+
if node.target in (
|
| 205 |
+
"load",
|
| 206 |
+
"store",
|
| 207 |
+
"store_reduction",
|
| 208 |
+
):
|
| 209 |
+
buf_name = node.args[1]
|
| 210 |
+
return V.graph.get_dtype(buf_name)
|
| 211 |
+
|
| 212 |
+
if node.target == operator.getitem:
|
| 213 |
+
return self.deduce_node_dtype(node.args[0])
|
| 214 |
+
|
| 215 |
+
assert isinstance(node.target, str)
|
| 216 |
+
|
| 217 |
+
if node.target == "reduction":
|
| 218 |
+
return node.args[1]
|
| 219 |
+
|
| 220 |
+
if node.target == "constant":
|
| 221 |
+
return DTYPE_TO_COMPUTATION_DTYPE[node.args[-1]]
|
| 222 |
+
|
| 223 |
+
if node.target.startswith("masked_subblock"):
|
| 224 |
+
return self.deduce_node_dtype_by_subgraph(node)
|
| 225 |
+
|
| 226 |
+
return self.deduce_node_dtype_by_inputs(node)
|
| 227 |
+
|
| 228 |
+
def propagate_graph(self, graph: torch.fx.Graph):
|
| 229 |
+
assert graph.nodes
|
| 230 |
+
graph_dtype = None
|
| 231 |
+
# For masked_subblock, we use output's dtype to represent
|
| 232 |
+
# the dtype of this subgraph. For other cases, graph_dtype
|
| 233 |
+
# might be None
|
| 234 |
+
for node in graph.nodes:
|
| 235 |
+
if OptimizationContext.key in node.meta:
|
| 236 |
+
opt_ctx = node.meta[OptimizationContext.key]
|
| 237 |
+
else:
|
| 238 |
+
opt_ctx = OptimizationContext()
|
| 239 |
+
|
| 240 |
+
opt_ctx.dtype = self.deduce_node_dtype(node)
|
| 241 |
+
node.meta[OptimizationContext.key] = opt_ctx
|
| 242 |
+
if node.target == "output":
|
| 243 |
+
graph_dtype = opt_ctx.dtype
|
| 244 |
+
return graph_dtype
|
| 245 |
+
|
| 246 |
+
def propagate(self):
|
| 247 |
+
self.propagate_graph(self.graphs["root"])
|
| 248 |
+
|
| 249 |
+
@classmethod
|
| 250 |
+
def propagate_loopbody(cls, body):
|
| 251 |
+
return cls(body).propagate()
|
| 252 |
+
|
| 253 |
+
@classmethod
|
| 254 |
+
def propagate_scheduler_node(cls, node):
|
| 255 |
+
from ..ir import LoopBody
|
| 256 |
+
from ..scheduler import SchedulerNode
|
| 257 |
+
|
| 258 |
+
assert isinstance(node, SchedulerNode)
|
| 259 |
+
assert isinstance(node._body, LoopBody)
|
| 260 |
+
DataTypePropagation.propagate_loopbody(node._body)
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
class ExprPrinter(Printer):
|
| 264 |
+
@staticmethod
|
| 265 |
+
def paren(string):
|
| 266 |
+
def all_in_parens(string):
|
| 267 |
+
if string[0] != "(" or len(string) < 2:
|
| 268 |
+
return False
|
| 269 |
+
count = 1
|
| 270 |
+
for i, char in enumerate(string[1:]):
|
| 271 |
+
if char == "(":
|
| 272 |
+
count += 1
|
| 273 |
+
elif char == ")":
|
| 274 |
+
count -= 1
|
| 275 |
+
if count == 0 and i != len(string) - 2:
|
| 276 |
+
return False
|
| 277 |
+
assert count == 0
|
| 278 |
+
return True
|
| 279 |
+
|
| 280 |
+
if (
|
| 281 |
+
isinstance(string, CSEVariable)
|
| 282 |
+
or re.match(r"^[a-z0-9_.]+$", string, re.I)
|
| 283 |
+
or re.match(r"^\([^)]*\)$", string, re.I)
|
| 284 |
+
or string == ""
|
| 285 |
+
):
|
| 286 |
+
return string
|
| 287 |
+
# don't put extra parens for strings that are already wrapped in parens
|
| 288 |
+
if all_in_parens(string):
|
| 289 |
+
return string
|
| 290 |
+
return f"({string})"
|
| 291 |
+
|
| 292 |
+
def _print_Infinity(self, expr):
|
| 293 |
+
return "math.inf"
|
| 294 |
+
|
| 295 |
+
def _print_NegativeInfinity(self, expr):
|
| 296 |
+
return "-math.inf"
|
| 297 |
+
|
| 298 |
+
def _print_Relational(self, expr):
|
| 299 |
+
return f" {expr.rel_op} ".join(map(self.paren, map(self._print, expr.args)))
|
| 300 |
+
|
| 301 |
+
def _print_Mul(self, expr):
|
| 302 |
+
return "*".join(map(self.paren, map(self._print, expr.args)))
|
| 303 |
+
|
| 304 |
+
def _print_Add(self, expr):
|
| 305 |
+
return " + ".join(map(self.paren, map(self._print, expr.args)))
|
| 306 |
+
|
| 307 |
+
def _print_Mod(self, expr):
|
| 308 |
+
return " % ".join(map(self.paren, map(self._print, expr.args)))
|
| 309 |
+
|
| 310 |
+
def _print_FloorDiv(self, expr):
|
| 311 |
+
raise NotImplementedError(f"_print_FloorDiv not implemented for {type(self)}")
|
| 312 |
+
|
| 313 |
+
def _print_CleanDiv(self, expr):
|
| 314 |
+
return self._print_FloorDiv(expr)
|
| 315 |
+
|
| 316 |
+
def _print_GreaterThan(self, expr):
|
| 317 |
+
# GreaterThan: >=
|
| 318 |
+
# StrictlyGreaterThan: >
|
| 319 |
+
# Go figure...
|
| 320 |
+
return " >= ".join(map(self.paren, map(self._print, expr.args)))
|
| 321 |
+
|
| 322 |
+
def _print_align(self, expr):
|
| 323 |
+
assert len(expr.args) == 1
|
| 324 |
+
return f"align({self._print(expr.args[0])})"
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
class PythonPrinter(ExprPrinter):
|
| 328 |
+
def _print_ModularIndexing(self, expr):
|
| 329 |
+
x, div, mod = expr.args
|
| 330 |
+
x = self.paren(self.doprint(x))
|
| 331 |
+
div = self.paren(self.doprint(div))
|
| 332 |
+
mod = self.paren(self.doprint(mod))
|
| 333 |
+
if div != "1":
|
| 334 |
+
x = f"({x} // {div})"
|
| 335 |
+
return f"{x} % {mod}"
|
| 336 |
+
|
| 337 |
+
def _print_FloorDiv(self, expr):
|
| 338 |
+
x, div = expr.args
|
| 339 |
+
x = self.paren(self.doprint(x))
|
| 340 |
+
div = self.paren(self.doprint(div))
|
| 341 |
+
return f"({x} // {div})"
|
| 342 |
+
|
| 343 |
+
def _helper_sqrt(self, expr):
|
| 344 |
+
return f"math.sqrt({self._print(expr)})"
|
| 345 |
+
|
| 346 |
+
def _print_Pow(self, expr):
|
| 347 |
+
# Pow() confuses triton
|
| 348 |
+
base, exp = expr.args
|
| 349 |
+
# NB: Remember this is sizevar computation! You don't typically
|
| 350 |
+
# expect to have to do floating point computation including exponents
|
| 351 |
+
# in sizevar compute. Instead of adding support for floating
|
| 352 |
+
# point pow, you should make upstream retranslate the Sympy expression
|
| 353 |
+
# into Tensor expressions earlier and do that instead.
|
| 354 |
+
if exp == 0.5:
|
| 355 |
+
return self._helper_sqrt(base)
|
| 356 |
+
elif exp == -0.5:
|
| 357 |
+
return "1/" + self._helper_sqrt(base)
|
| 358 |
+
base = self._print(base)
|
| 359 |
+
assert exp == int(exp), exp
|
| 360 |
+
exp = int(exp)
|
| 361 |
+
if exp > 0:
|
| 362 |
+
return "*".join([self.paren(base)] * exp)
|
| 363 |
+
elif exp < 0:
|
| 364 |
+
return "1/" + self.paren("*".join([self.paren(base)] * abs(exp)))
|
| 365 |
+
else: # exp == 0
|
| 366 |
+
return "1"
|
| 367 |
+
|
| 368 |
+
def _print_floor(self, expr):
|
| 369 |
+
assert len(expr.args) == 1
|
| 370 |
+
return f"math.floor({self._print(expr.args[0])})"
|
| 371 |
+
|
| 372 |
+
def _print_ceiling(self, expr):
|
| 373 |
+
assert len(expr.args) == 1
|
| 374 |
+
return f"math.ceil({self._print(expr.args[0])})"
|
| 375 |
+
|
| 376 |
+
def _print_Abs(self, expr):
|
| 377 |
+
assert len(expr.args) == 1
|
| 378 |
+
return f"abs({self._print(expr.args[0])})"
|
| 379 |
+
|
| 380 |
+
def _print_Max(self, expr):
|
| 381 |
+
assert len(expr.args) >= 2
|
| 382 |
+
return f"max({', '.join(map(self._print, expr.args))})"
|
| 383 |
+
|
| 384 |
+
def _print_Min(self, expr):
|
| 385 |
+
assert len(expr.args) >= 2
|
| 386 |
+
return f"min({', '.join(map(self._print, expr.args))})"
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
class OpOverrides:
|
| 390 |
+
def __init__(self, parent):
|
| 391 |
+
super().__init__()
|
| 392 |
+
self._parent = parent
|
| 393 |
+
|
| 394 |
+
def __getattr__(self, item):
|
| 395 |
+
return getattr(self._parent, item)
|
| 396 |
+
|
| 397 |
+
@staticmethod
|
| 398 |
+
def identity(value):
|
| 399 |
+
# used to trigger cse
|
| 400 |
+
return value
|
| 401 |
+
|
| 402 |
+
@staticmethod
|
| 403 |
+
def constant(value, dtype):
|
| 404 |
+
return repr(value)
|
| 405 |
+
|
| 406 |
+
@staticmethod
|
| 407 |
+
def reciprocal(x):
|
| 408 |
+
return ops.truediv("1", x)
|
| 409 |
+
|
| 410 |
+
@staticmethod
|
| 411 |
+
def square(x):
|
| 412 |
+
return ops.mul(x, x)
|
| 413 |
+
|
| 414 |
+
@staticmethod
|
| 415 |
+
def bitwise_not(x):
|
| 416 |
+
return f"~{ExprPrinter.paren(x)}"
|
| 417 |
+
|
| 418 |
+
@staticmethod
|
| 419 |
+
def logical_not(a):
|
| 420 |
+
return f"{ExprPrinter.paren(a)} == 0"
|
| 421 |
+
|
| 422 |
+
@staticmethod
|
| 423 |
+
def bitwise_and(x, y):
|
| 424 |
+
return f"{ExprPrinter.paren(x)} & {ExprPrinter.paren(y)}"
|
| 425 |
+
|
| 426 |
+
@staticmethod
|
| 427 |
+
def bitwise_or(x, y):
|
| 428 |
+
return f"{ExprPrinter.paren(x)} | {ExprPrinter.paren(y)}"
|
| 429 |
+
|
| 430 |
+
@staticmethod
|
| 431 |
+
def bitwise_xor(x, y):
|
| 432 |
+
return f"{ExprPrinter.paren(x)} ^ {ExprPrinter.paren(y)}"
|
| 433 |
+
|
| 434 |
+
@staticmethod
|
| 435 |
+
def bitwise_left_shift(x, y):
|
| 436 |
+
return f"{ExprPrinter.paren(x)} << {ExprPrinter.paren(y)}"
|
| 437 |
+
|
| 438 |
+
# TODO(fdrocha): this is currently not being used anywhere,
|
| 439 |
+
# pending on moving triton pin past 972b761
|
| 440 |
+
@staticmethod
|
| 441 |
+
def bitwise_right_shift(x, y):
|
| 442 |
+
return f"{ExprPrinter.paren(x)} >> {ExprPrinter.paren(y)}"
|
| 443 |
+
|
| 444 |
+
@staticmethod
|
| 445 |
+
def remainder(a, b):
|
| 446 |
+
r = ops.mod(a, b)
|
| 447 |
+
return ops.where(f"(({r} != 0) & (({r} < 0) != ({b} < 0)))", ops.add(r, b), r)
|
| 448 |
+
|
| 449 |
+
@staticmethod
|
| 450 |
+
def load_seed(name, offset):
|
| 451 |
+
return ops.load(name, sympy.Integer(offset))
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
class DeferredLine(DeferredLineBase):
|
| 455 |
+
"""A line that can be 'unwritten' by adding name to V.graph.removed_buffers"""
|
| 456 |
+
|
| 457 |
+
def __init__(self, name, line):
|
| 458 |
+
super().__init__(line)
|
| 459 |
+
self.name = name
|
| 460 |
+
|
| 461 |
+
def __call__(self):
|
| 462 |
+
if all(
|
| 463 |
+
self.name not in x
|
| 464 |
+
for x in (
|
| 465 |
+
V.graph.removed_buffers,
|
| 466 |
+
V.kernel.removed_buffers,
|
| 467 |
+
V.graph.inplaced_to_remove,
|
| 468 |
+
V.kernel.inplaced_to_remove,
|
| 469 |
+
)
|
| 470 |
+
):
|
| 471 |
+
return self.line
|
| 472 |
+
return None
|
| 473 |
+
|
| 474 |
+
def _new_line(self, line):
|
| 475 |
+
return DeferredLine(self.name, line)
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
class BracesBuffer(IndentedBuffer):
|
| 479 |
+
def indent(self, offset=1):
|
| 480 |
+
@contextlib.contextmanager
|
| 481 |
+
def ctx():
|
| 482 |
+
for _ in range(offset):
|
| 483 |
+
self.writeline("{")
|
| 484 |
+
self._indent += 1
|
| 485 |
+
for _ in range(-offset):
|
| 486 |
+
self._indent -= 1
|
| 487 |
+
self.writeline("}")
|
| 488 |
+
yield
|
| 489 |
+
for _ in range(-offset):
|
| 490 |
+
self.writeline("{")
|
| 491 |
+
self._indent += 1
|
| 492 |
+
for _ in range(offset):
|
| 493 |
+
self._indent -= 1
|
| 494 |
+
self.writeline("}")
|
| 495 |
+
|
| 496 |
+
return ctx()
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
class InplacedBuffer(NamedTuple):
|
| 500 |
+
inner_name: str
|
| 501 |
+
other_names: List[str]
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
class KernelArgs:
|
| 505 |
+
@staticmethod
|
| 506 |
+
def _lookup(prefix, odict, name):
|
| 507 |
+
assert isinstance(name, (str, sympy.Symbol))
|
| 508 |
+
if name not in odict:
|
| 509 |
+
odict[name] = f"{prefix}{len(odict)}"
|
| 510 |
+
return odict[name]
|
| 511 |
+
|
| 512 |
+
def __init__(self, sizevars=None):
|
| 513 |
+
self.input_buffers = dict()
|
| 514 |
+
self.output_buffers = dict()
|
| 515 |
+
self.inplace_buffers = dict()
|
| 516 |
+
self.sizevars = sizevars or dict()
|
| 517 |
+
|
| 518 |
+
def __repr__(self):
|
| 519 |
+
return "KernelArgs({})".format(
|
| 520 |
+
", ".join(
|
| 521 |
+
map(
|
| 522 |
+
repr,
|
| 523 |
+
[
|
| 524 |
+
self.input_buffers,
|
| 525 |
+
self.output_buffers,
|
| 526 |
+
self.inplace_buffers,
|
| 527 |
+
self.sizevars,
|
| 528 |
+
],
|
| 529 |
+
)
|
| 530 |
+
)
|
| 531 |
+
)
|
| 532 |
+
|
| 533 |
+
def _buffer_is_marked_removed(self, name):
|
| 534 |
+
return isinstance(name, str) and name.startswith("REMOVED")
|
| 535 |
+
|
| 536 |
+
def input(self, name):
|
| 537 |
+
if V.graph.scheduler:
|
| 538 |
+
name = V.graph.scheduler.mutation_real_name.get(name, name)
|
| 539 |
+
assert name not in V.graph.removed_buffers, name
|
| 540 |
+
if name in self.output_buffers:
|
| 541 |
+
return self.output_buffers[name]
|
| 542 |
+
if name in self.inplace_buffers:
|
| 543 |
+
return self.inplace_buffers[name].inner_name
|
| 544 |
+
if name.startswith("seed"):
|
| 545 |
+
return self._lookup("seed", self.input_buffers, name)
|
| 546 |
+
return self._lookup("in_ptr", self.input_buffers, name)
|
| 547 |
+
|
| 548 |
+
def output(self, name):
|
| 549 |
+
if V.graph.scheduler:
|
| 550 |
+
name = V.graph.scheduler.mutation_real_name.get(name, name)
|
| 551 |
+
assert name not in V.graph.removed_buffers, name
|
| 552 |
+
if name in self.inplace_buffers:
|
| 553 |
+
return self.inplace_buffers[name].inner_name
|
| 554 |
+
return self._lookup("out_ptr", self.output_buffers, name)
|
| 555 |
+
|
| 556 |
+
def make_inplace(self, input_name, output_name):
|
| 557 |
+
assert output_name not in self.inplace_buffers
|
| 558 |
+
if input_name in self.inplace_buffers:
|
| 559 |
+
buf = self.inplace_buffers[input_name]
|
| 560 |
+
buf.other_names.append(output_name)
|
| 561 |
+
self.inplace_buffers[output_name] = buf
|
| 562 |
+
else:
|
| 563 |
+
buf = InplacedBuffer(
|
| 564 |
+
f"in_out_ptr{len(unique(self.inplace_buffers.values()))}",
|
| 565 |
+
[input_name, output_name],
|
| 566 |
+
)
|
| 567 |
+
self.inplace_buffers[input_name] = buf
|
| 568 |
+
self.inplace_buffers[output_name] = buf
|
| 569 |
+
|
| 570 |
+
def seed_offset(self, name, value):
|
| 571 |
+
if value in self.sizevars:
|
| 572 |
+
return self.sizevars[value]
|
| 573 |
+
if name in self.sizevars.values():
|
| 574 |
+
name = (
|
| 575 |
+
f"{name}{sum(1 for v in self.sizevars.values() if v.startswith(name))}"
|
| 576 |
+
)
|
| 577 |
+
self.sizevars[value] = name
|
| 578 |
+
return name
|
| 579 |
+
|
| 580 |
+
def size(self, name):
|
| 581 |
+
if str(name) == "seed":
|
| 582 |
+
self.sizevars["seed"] = "seed"
|
| 583 |
+
return "seed"
|
| 584 |
+
return self._lookup("ks", self.sizevars, name)
|
| 585 |
+
|
| 586 |
+
def call_names(self):
|
| 587 |
+
return chain(
|
| 588 |
+
self.input_buffers.keys(), self.output_buffers.keys(), self.sizevars.keys()
|
| 589 |
+
)
|
| 590 |
+
|
| 591 |
+
def wrap_ptr_arg(self, buf, dtype):
|
| 592 |
+
return f"c_void_p({buf}.data_ptr())"
|
| 593 |
+
|
| 594 |
+
def wrap_size_arg(self, size):
|
| 595 |
+
return f"c_long({size})"
|
| 596 |
+
|
| 597 |
+
def cpp_argdefs(self):
|
| 598 |
+
from .cpp import DTYPE_TO_CPP, INDEX_TYPE
|
| 599 |
+
|
| 600 |
+
call_args = []
|
| 601 |
+
arg_defs = []
|
| 602 |
+
arg_types = []
|
| 603 |
+
for inplaced in unique(self.inplace_buffers.values()):
|
| 604 |
+
if self._buffer_is_marked_removed(inplaced):
|
| 605 |
+
continue
|
| 606 |
+
outer = inplaced.other_names[-1]
|
| 607 |
+
inner = inplaced.inner_name
|
| 608 |
+
dtype = V.graph.get_dtype(outer)
|
| 609 |
+
cpp_dtype = DTYPE_TO_CPP[dtype]
|
| 610 |
+
arg_defs.append(f"{cpp_dtype}* {inner}")
|
| 611 |
+
call_args.append(self.wrap_ptr_arg(outer, dtype))
|
| 612 |
+
arg_types.append(f"{cpp_dtype}*")
|
| 613 |
+
for outer, inner in self.input_buffers.items():
|
| 614 |
+
if outer in self.inplace_buffers:
|
| 615 |
+
continue
|
| 616 |
+
dtype = V.graph.get_dtype(outer)
|
| 617 |
+
cpp_dtype = DTYPE_TO_CPP[dtype]
|
| 618 |
+
arg_defs.append(f"const {cpp_dtype}* {inner}")
|
| 619 |
+
call_args.append(self.wrap_ptr_arg(outer, dtype))
|
| 620 |
+
arg_types.append(f"const {cpp_dtype}*")
|
| 621 |
+
for outer, inner in self.output_buffers.items():
|
| 622 |
+
if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner):
|
| 623 |
+
continue
|
| 624 |
+
dtype = V.graph.get_dtype(outer)
|
| 625 |
+
cpp_dtype = DTYPE_TO_CPP[dtype]
|
| 626 |
+
arg_defs.append(f"{cpp_dtype}* {inner}")
|
| 627 |
+
call_args.append(self.wrap_ptr_arg(outer, dtype))
|
| 628 |
+
arg_types.append(f"{cpp_dtype}*")
|
| 629 |
+
for outer, inner in self.sizevars.items():
|
| 630 |
+
arg_defs.append(f"const {INDEX_TYPE} {inner}")
|
| 631 |
+
call_args.append(self.wrap_size_arg(outer))
|
| 632 |
+
arg_types.append(f"const {INDEX_TYPE}")
|
| 633 |
+
return arg_defs, call_args, arg_types
|
| 634 |
+
|
| 635 |
+
def python_argdefs(self):
|
| 636 |
+
arg_defs = []
|
| 637 |
+
call_args = []
|
| 638 |
+
precompile_args: List[Union[TensorArg, SizeArg]] = []
|
| 639 |
+
for inplaced in unique(self.inplace_buffers.values()):
|
| 640 |
+
if self._buffer_is_marked_removed(inplaced):
|
| 641 |
+
continue
|
| 642 |
+
arg_defs.append(inplaced.inner_name)
|
| 643 |
+
call_args.append(inplaced.other_names[-1])
|
| 644 |
+
precompile_args.append(
|
| 645 |
+
TensorArg(
|
| 646 |
+
inplaced.inner_name,
|
| 647 |
+
inplaced.other_names[-1],
|
| 648 |
+
V.graph.get_dtype(inplaced.other_names[-1]),
|
| 649 |
+
True,
|
| 650 |
+
)
|
| 651 |
+
)
|
| 652 |
+
for outer, inner in chain(
|
| 653 |
+
self.input_buffers.items(), self.output_buffers.items()
|
| 654 |
+
):
|
| 655 |
+
if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner):
|
| 656 |
+
continue
|
| 657 |
+
arg_defs.append(inner)
|
| 658 |
+
call_args.append(outer)
|
| 659 |
+
precompile_args.append(
|
| 660 |
+
TensorArg(inner, outer, V.graph.get_dtype(outer), True)
|
| 661 |
+
)
|
| 662 |
+
for outer, inner in self.sizevars.items():
|
| 663 |
+
arg_defs.append(inner)
|
| 664 |
+
call_args.append(outer)
|
| 665 |
+
precompile_args.append(SizeArg(inner, outer))
|
| 666 |
+
|
| 667 |
+
return arg_defs, call_args, precompile_args
|
| 668 |
+
|
| 669 |
+
def aliases(self):
|
| 670 |
+
for inplaced in unique(self.inplace_buffers.values()):
|
| 671 |
+
if self._buffer_is_marked_removed(inplaced):
|
| 672 |
+
continue
|
| 673 |
+
for other in inplaced.other_names:
|
| 674 |
+
if (
|
| 675 |
+
other in V.graph.inplaced_to_remove
|
| 676 |
+
or other in V.kernel.inplaced_to_remove
|
| 677 |
+
):
|
| 678 |
+
continue
|
| 679 |
+
if other in self.input_buffers:
|
| 680 |
+
yield self.input_buffers[other], inplaced.inner_name
|
| 681 |
+
if other in self.output_buffers:
|
| 682 |
+
yield self.output_buffers[other], inplaced.inner_name
|
| 683 |
+
|
| 684 |
+
def is_removed(self, name):
|
| 685 |
+
def _is_removed(name, buffers):
|
| 686 |
+
return name not in buffers or self._buffer_is_marked_removed(buffers[name])
|
| 687 |
+
|
| 688 |
+
return _is_removed(name, self.output_buffers) and _is_removed(
|
| 689 |
+
name, self.inplace_buffers
|
| 690 |
+
)
|
| 691 |
+
|
| 692 |
+
# Includes inplace buffers, excludes removed buffers. Essentially,
|
| 693 |
+
# after you do a call into this kernel, which buffers actually contain
|
| 694 |
+
# updated data? Modeled off of python_argdefs.
|
| 695 |
+
def live_output_buffers(self):
|
| 696 |
+
live_outs = set()
|
| 697 |
+
for inplaced in unique(self.inplace_buffers.values()):
|
| 698 |
+
if self._buffer_is_marked_removed(inplaced):
|
| 699 |
+
continue
|
| 700 |
+
live_outs.add(inplaced.other_names[-1])
|
| 701 |
+
for outer, inner in self.output_buffers.items():
|
| 702 |
+
if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner):
|
| 703 |
+
continue
|
| 704 |
+
live_outs.add(outer)
|
| 705 |
+
return live_outs
|
| 706 |
+
|
| 707 |
+
|
| 708 |
+
class CSEVariable:
|
| 709 |
+
"""A CSEVariable is just a name for an expression but it is useful to be able to annotate them on a backend dependent basis.
|
| 710 |
+
To do so, the backends can simply overload `Kernel.create_cse_var`
|
| 711 |
+
The "CSEVariable.update_on_args" method gives you a hook for annotations
|
| 712 |
+
See example of TritonCSEVariable in triton.py
|
| 713 |
+
"""
|
| 714 |
+
|
| 715 |
+
def __init__(self, name, bounds: ValueRanges):
|
| 716 |
+
assert isinstance(bounds, ValueRanges)
|
| 717 |
+
self.name = name
|
| 718 |
+
self.bounds = bounds
|
| 719 |
+
|
| 720 |
+
def __str__(self):
|
| 721 |
+
return self.name
|
| 722 |
+
|
| 723 |
+
def __hash__(self) -> int:
|
| 724 |
+
return hash(self.name)
|
| 725 |
+
|
| 726 |
+
def __eq__(self, other) -> bool:
|
| 727 |
+
return type(other) == type(self) and other.name == self.name
|
| 728 |
+
|
| 729 |
+
def update_on_args(self, name, args, kwargs):
|
| 730 |
+
pass
|
| 731 |
+
|
| 732 |
+
|
| 733 |
+
class CppWrapperKernelArgs(KernelArgs):
|
| 734 |
+
def wrap_ptr_arg(self, buf, dtype):
|
| 735 |
+
from .cpp import DTYPE_TO_CPP
|
| 736 |
+
|
| 737 |
+
if config.aot_inductor.abi_compatible:
|
| 738 |
+
# In the abi_compatible model, we just return the buf here.
|
| 739 |
+
# We will form correct call args later in wrapper.generate_kernel_all.
|
| 740 |
+
return buf
|
| 741 |
+
else:
|
| 742 |
+
return f"({DTYPE_TO_CPP[dtype]}*)({buf}.data_ptr())"
|
| 743 |
+
|
| 744 |
+
def wrap_size_arg(self, size):
|
| 745 |
+
return f"{size}"
|
| 746 |
+
|
| 747 |
+
|
| 748 |
+
class CSE:
|
| 749 |
+
"""Common subexpression elimination"""
|
| 750 |
+
|
| 751 |
+
def __init__(
|
| 752 |
+
self,
|
| 753 |
+
prefix="",
|
| 754 |
+
suffix="",
|
| 755 |
+
name_prefix="tmp",
|
| 756 |
+
iter_buffers=None,
|
| 757 |
+
store_cache=None,
|
| 758 |
+
reduction_cache=None,
|
| 759 |
+
varname_map=None,
|
| 760 |
+
):
|
| 761 |
+
self.prefix = prefix
|
| 762 |
+
self.suffix = suffix
|
| 763 |
+
self.cache = {}
|
| 764 |
+
self.name_prefix = name_prefix
|
| 765 |
+
self.store_cache = store_cache or {}
|
| 766 |
+
self.reduction_cache = reduction_cache or {}
|
| 767 |
+
self.iter_buffer_ids = iter_buffers or itertools.count()
|
| 768 |
+
self.invalidated_stores = set()
|
| 769 |
+
self.varname_map = varname_map or {}
|
| 770 |
+
|
| 771 |
+
def invalidate(self, keep_vars: Set[str]):
|
| 772 |
+
for name, tmp in list(self.store_cache.items()):
|
| 773 |
+
if tmp not in keep_vars:
|
| 774 |
+
del self.store_cache[name]
|
| 775 |
+
self.invalidated_stores.add(name)
|
| 776 |
+
self.cache = {k: v for k, v in self.cache.items() if v in keep_vars}
|
| 777 |
+
|
| 778 |
+
def clone(self):
|
| 779 |
+
# Note(fdrocha): reduction_cache is not being cloned, not sure if this is intentional
|
| 780 |
+
return CSE(
|
| 781 |
+
prefix=self.prefix,
|
| 782 |
+
suffix=self.suffix,
|
| 783 |
+
name_prefix=self.name_prefix,
|
| 784 |
+
iter_buffers=self.iter_buffer_ids,
|
| 785 |
+
store_cache=self.store_cache,
|
| 786 |
+
varname_map=self.varname_map,
|
| 787 |
+
)
|
| 788 |
+
|
| 789 |
+
def generate(
|
| 790 |
+
self,
|
| 791 |
+
buffer: IndentedBuffer,
|
| 792 |
+
expr: Union[str, CSEVariable, OpsValue],
|
| 793 |
+
*,
|
| 794 |
+
bounds: ValueRanges = ValueRanges.unknown(),
|
| 795 |
+
write=True,
|
| 796 |
+
assignment=True,
|
| 797 |
+
) -> CSEVariable:
|
| 798 |
+
if isinstance(expr, OpsValue):
|
| 799 |
+
expr = expr.value
|
| 800 |
+
|
| 801 |
+
assert isinstance(expr, (str, CSEVariable)), type(expr)
|
| 802 |
+
assert write or assignment
|
| 803 |
+
if isinstance(expr, CSEVariable):
|
| 804 |
+
# If the expressions were always created with all the information, we could
|
| 805 |
+
# assert expr.bounds == bounds, but sometimes the expression is created
|
| 806 |
+
# with the loose ValueRanges.unknown(), so we need to tighten the bounds
|
| 807 |
+
expr.bounds = expr.bounds.tighten(bounds)
|
| 808 |
+
return expr
|
| 809 |
+
cache_key = expr
|
| 810 |
+
var = self.cache.get(cache_key, None)
|
| 811 |
+
if not var:
|
| 812 |
+
var = self.newvar(bounds) if assignment else None
|
| 813 |
+
self.cache[cache_key] = var
|
| 814 |
+
if write:
|
| 815 |
+
if V.kernel.current_node:
|
| 816 |
+
V.kernel.current_node.codegen_originating_info(
|
| 817 |
+
buffer, only_once=True
|
| 818 |
+
)
|
| 819 |
+
if assignment:
|
| 820 |
+
line = f"{self.prefix}{var} = {expr}{self.suffix}"
|
| 821 |
+
else:
|
| 822 |
+
line = f"{expr}{self.suffix}"
|
| 823 |
+
buffer.writeline(line)
|
| 824 |
+
else:
|
| 825 |
+
var.bounds = var.bounds.tighten(bounds)
|
| 826 |
+
|
| 827 |
+
return var
|
| 828 |
+
|
| 829 |
+
def newvar(self, bounds: ValueRanges = ValueRanges.unknown()) -> CSEVariable:
|
| 830 |
+
var_name = f"{self.name_prefix}{next(self.iter_buffer_ids)}"
|
| 831 |
+
var = V.kernel.create_cse_var(var_name, bounds)
|
| 832 |
+
self.varname_map[var_name] = var
|
| 833 |
+
return var
|
| 834 |
+
|
| 835 |
+
|
| 836 |
+
class IndirectAssertLine(DeferredLineBase):
|
| 837 |
+
def __init__(self, line, assert_fn, var, mask, size_map):
|
| 838 |
+
self.var = var
|
| 839 |
+
self.mask = mask
|
| 840 |
+
self.line = line
|
| 841 |
+
self.assert_fn = assert_fn
|
| 842 |
+
self.size_map = size_map
|
| 843 |
+
|
| 844 |
+
def __call__(self):
|
| 845 |
+
size, size_str = self.size_map[(self.var, self.mask)]
|
| 846 |
+
|
| 847 |
+
# We assert if we've not been able to prove the bound
|
| 848 |
+
assert_min = (self.var.bounds.lower >= 0) != sympy.true
|
| 849 |
+
assert_max = (self.var.bounds.upper < size) != sympy.true
|
| 850 |
+
|
| 851 |
+
# FooBar interview question
|
| 852 |
+
if not (assert_min or assert_max):
|
| 853 |
+
return None
|
| 854 |
+
elif assert_min and assert_max:
|
| 855 |
+
# The conditions need to be in parens because of Python's operator precedence.
|
| 856 |
+
# It'd be less error-prone to use and/or/not, which is suported by triton
|
| 857 |
+
cond = f"(0 <= {self.var}) & ({self.var} < {size_str})"
|
| 858 |
+
cond_print = f"0 <= {self.var} < {size_str}"
|
| 859 |
+
elif assert_min:
|
| 860 |
+
cond = f"0 <= {self.var}"
|
| 861 |
+
cond_print = cond
|
| 862 |
+
else:
|
| 863 |
+
assert assert_max
|
| 864 |
+
cond = f"{self.var} < {size_str}"
|
| 865 |
+
cond_print = cond
|
| 866 |
+
|
| 867 |
+
if self.mask:
|
| 868 |
+
cond = f"({cond}) | ~{self.mask}"
|
| 869 |
+
return self.line.format(
|
| 870 |
+
assert_fn=self.assert_fn, cond=cond, cond_print=cond_print
|
| 871 |
+
)
|
| 872 |
+
|
| 873 |
+
def _new_line(self, line):
|
| 874 |
+
return IndirectAssertLine(
|
| 875 |
+
line, self.assert_fn, self.var, self.mask, self.size_map
|
| 876 |
+
)
|
| 877 |
+
|
| 878 |
+
|
| 879 |
+
class CodeGen:
|
| 880 |
+
def __init__(self):
|
| 881 |
+
super().__init__()
|
| 882 |
+
self.exit_stack = contextlib.ExitStack()
|
| 883 |
+
|
| 884 |
+
def __enter__(self):
|
| 885 |
+
self.exit_stack.__enter__()
|
| 886 |
+
return self
|
| 887 |
+
|
| 888 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 889 |
+
self.exit_stack.__exit__(exc_type, exc_val, exc_tb)
|
| 890 |
+
|
| 891 |
+
|
| 892 |
+
class Kernel(CodeGen):
|
| 893 |
+
newvar_prefix = ""
|
| 894 |
+
suffix = ""
|
| 895 |
+
overrides = None
|
| 896 |
+
load_format = None
|
| 897 |
+
store_format = None
|
| 898 |
+
|
| 899 |
+
def __init__(self, args=None, increase_kernel_count=True):
|
| 900 |
+
super().__init__()
|
| 901 |
+
if increase_kernel_count:
|
| 902 |
+
metrics.generated_kernel_count += 1
|
| 903 |
+
self.args = args or KernelArgs()
|
| 904 |
+
self.loads = IndentedBuffer()
|
| 905 |
+
self.compute = IndentedBuffer()
|
| 906 |
+
self.stores = IndentedBuffer()
|
| 907 |
+
self.cse: CSE = CSE(self.newvar_prefix, self.suffix)
|
| 908 |
+
self.must_keep_buffers = set()
|
| 909 |
+
self.store_buffer_names = set()
|
| 910 |
+
self._load_mask = None
|
| 911 |
+
# set in set_current_node
|
| 912 |
+
self.current_node = None
|
| 913 |
+
self.node_to_bounds: Optional[Dict[torch.fx.Node, ValueRanges]] = None
|
| 914 |
+
# Upper bounds for indirect_indexing and their str representation
|
| 915 |
+
self.indirect_max_sizes: Dict[Tuple[str, str], Tuple[sympy.Expr, str]] = {}
|
| 916 |
+
|
| 917 |
+
self.removed_buffers = set()
|
| 918 |
+
self.inplaced_to_remove = set()
|
| 919 |
+
|
| 920 |
+
# key: the buffer to write
|
| 921 |
+
# value: the buffer to read and whose memory can be reused for
|
| 922 |
+
# the buffer specified by key
|
| 923 |
+
self.inplace_update_buffers = dict()
|
| 924 |
+
# Set minimum number of elements processed per thread.
|
| 925 |
+
self.min_elem_per_thread = 1
|
| 926 |
+
|
| 927 |
+
@contextlib.contextmanager
|
| 928 |
+
def set_current_node(self, node):
|
| 929 |
+
prior = self.current_node
|
| 930 |
+
self.current_node = node
|
| 931 |
+
self.node_to_bounds = node._body.bounds().get_bounds()
|
| 932 |
+
try:
|
| 933 |
+
yield
|
| 934 |
+
finally:
|
| 935 |
+
self.current_node = prior
|
| 936 |
+
|
| 937 |
+
@contextlib.contextmanager
|
| 938 |
+
def swap_buffers(self, lb, cb=None, sb=None):
|
| 939 |
+
if cb is None:
|
| 940 |
+
cb = lb
|
| 941 |
+
loads = self.loads
|
| 942 |
+
compute = self.compute
|
| 943 |
+
stores = self.stores
|
| 944 |
+
cse = self.cse
|
| 945 |
+
self.loads = lb
|
| 946 |
+
self.compute = cb
|
| 947 |
+
self.stores = sb
|
| 948 |
+
self.cse = cse.clone()
|
| 949 |
+
try:
|
| 950 |
+
yield
|
| 951 |
+
finally:
|
| 952 |
+
self.loads = loads
|
| 953 |
+
self.compute = compute
|
| 954 |
+
self.stores = stores
|
| 955 |
+
self.cse = cse
|
| 956 |
+
|
| 957 |
+
def load(self, name: str, index: sympy.Expr):
|
| 958 |
+
raise NotImplementedError()
|
| 959 |
+
|
| 960 |
+
def indirect_load(self, name: str, index: sympy.Expr):
|
| 961 |
+
"""A load the depends on an index we have read"""
|
| 962 |
+
prior = self.loads
|
| 963 |
+
try:
|
| 964 |
+
# put the load in the compute section as it might have deps
|
| 965 |
+
self.loads = self.compute
|
| 966 |
+
return self.load(name, index)
|
| 967 |
+
finally:
|
| 968 |
+
self.loads = prior
|
| 969 |
+
|
| 970 |
+
def store_reduction(self, name, index, value):
|
| 971 |
+
raise NotImplementedError()
|
| 972 |
+
|
| 973 |
+
def store(self, name, index, value, mode=None):
|
| 974 |
+
raise NotImplementedError()
|
| 975 |
+
|
| 976 |
+
def reduction(self, dtype, src_dtype, reduction_type, value):
|
| 977 |
+
raise NotImplementedError()
|
| 978 |
+
|
| 979 |
+
def bucketize(
|
| 980 |
+
self,
|
| 981 |
+
values,
|
| 982 |
+
offsets_name: str,
|
| 983 |
+
offsets_size: sympy.Expr,
|
| 984 |
+
indexing_dtype: torch.dtype,
|
| 985 |
+
right: bool,
|
| 986 |
+
):
|
| 987 |
+
"""
|
| 988 |
+
See [Note: Inductor bucketize op]
|
| 989 |
+
"""
|
| 990 |
+
raise NotImplementedError()
|
| 991 |
+
|
| 992 |
+
@property
|
| 993 |
+
def assert_function(self) -> str:
|
| 994 |
+
raise NotImplementedError()
|
| 995 |
+
|
| 996 |
+
def index_to_str(self, index: sympy.Expr) -> str:
|
| 997 |
+
raise NotImplementedError()
|
| 998 |
+
|
| 999 |
+
def __enter__(self):
|
| 1000 |
+
class CSEProxy:
|
| 1001 |
+
self.name = "CSEProxy"
|
| 1002 |
+
|
| 1003 |
+
@staticmethod
|
| 1004 |
+
def __getattr__(name: str) -> Callable[..., CSEVariable]: # type: ignore[misc]
|
| 1005 |
+
def inner(*args, **kwargs):
|
| 1006 |
+
# TritonTemplateKernel has no current_node
|
| 1007 |
+
buf_bounds = ValueRanges.unknown()
|
| 1008 |
+
if hasattr(V.interpreter, "current_node"):
|
| 1009 |
+
fx_node = V.interpreter.current_node
|
| 1010 |
+
assert isinstance(self.node_to_bounds, dict)
|
| 1011 |
+
buf_bounds = self.node_to_bounds.get(
|
| 1012 |
+
fx_node, ValueRanges.unknown()
|
| 1013 |
+
)
|
| 1014 |
+
|
| 1015 |
+
csevar = self.cse.generate(
|
| 1016 |
+
self.compute,
|
| 1017 |
+
getattr(parent_handler, name)(*args, **kwargs), # type: ignore[has-type]
|
| 1018 |
+
bounds=buf_bounds,
|
| 1019 |
+
)
|
| 1020 |
+
csevar.update_on_args(name, args, kwargs)
|
| 1021 |
+
return csevar
|
| 1022 |
+
|
| 1023 |
+
return inner
|
| 1024 |
+
|
| 1025 |
+
@staticmethod
|
| 1026 |
+
def indirect_indexing(var, size, check=True):
|
| 1027 |
+
# Skip CSE since this doesn't return an expression
|
| 1028 |
+
|
| 1029 |
+
if var.bounds.lower < 0:
|
| 1030 |
+
new_bounds = ValueRanges.unknown()
|
| 1031 |
+
if var.bounds != ValueRanges.unknown() and isinstance(
|
| 1032 |
+
size, sympy.Number
|
| 1033 |
+
):
|
| 1034 |
+
# Take the negative part of the bound and add size to it
|
| 1035 |
+
# Then take union of that and the positive part
|
| 1036 |
+
# This is a tighter bound than that of a generic ops.where, as we have info on the cond
|
| 1037 |
+
neg = var.bounds & ValueRanges(-sympy.oo, -1)
|
| 1038 |
+
new_bounds = ValueRanges(neg.lower + size, neg.upper + size)
|
| 1039 |
+
# We don't have a good way of representing the empty range
|
| 1040 |
+
if var.bounds.upper >= 0:
|
| 1041 |
+
pos = var.bounds & ValueRanges(0, sympy.oo)
|
| 1042 |
+
new_bounds = new_bounds | pos
|
| 1043 |
+
|
| 1044 |
+
stm = ops.add(var, self.rename_indexing(size))
|
| 1045 |
+
# Mixed negative and non-negative
|
| 1046 |
+
if var.bounds.upper >= 0:
|
| 1047 |
+
lt = ops.lt(var, "0")
|
| 1048 |
+
stm = ops.where(lt, stm, var)
|
| 1049 |
+
new_var = self.cse.generate(self.compute, stm, bounds=new_bounds)
|
| 1050 |
+
|
| 1051 |
+
new_var.update_on_args("index_wrap", (var,), {})
|
| 1052 |
+
var = new_var
|
| 1053 |
+
|
| 1054 |
+
if self.generate_assert(check):
|
| 1055 |
+
mask = self.load_mask(var)
|
| 1056 |
+
|
| 1057 |
+
# An assertion line may have been written already, if so just
|
| 1058 |
+
# update the max size.
|
| 1059 |
+
map_key = (var, mask)
|
| 1060 |
+
existing_size, _ = self.indirect_max_sizes.get(
|
| 1061 |
+
map_key, (None, None)
|
| 1062 |
+
)
|
| 1063 |
+
if existing_size is not None:
|
| 1064 |
+
size = sympy.Min(size, existing_size)
|
| 1065 |
+
else:
|
| 1066 |
+
line = (
|
| 1067 |
+
'{assert_fn}({cond}, "index out of bounds: {cond_print}")'
|
| 1068 |
+
)
|
| 1069 |
+
self.compute.writeline(
|
| 1070 |
+
IndirectAssertLine(
|
| 1071 |
+
line,
|
| 1072 |
+
self.assert_function,
|
| 1073 |
+
var,
|
| 1074 |
+
mask,
|
| 1075 |
+
self.indirect_max_sizes,
|
| 1076 |
+
)
|
| 1077 |
+
)
|
| 1078 |
+
|
| 1079 |
+
self.indirect_max_sizes[map_key] = (size, self.index_to_str(size))
|
| 1080 |
+
return sympy_symbol(str(var))
|
| 1081 |
+
|
| 1082 |
+
@staticmethod
|
| 1083 |
+
def load(name: str, index: sympy.Expr):
|
| 1084 |
+
if name in self.cse.invalidated_stores:
|
| 1085 |
+
# A load from an invalidated store requires us to
|
| 1086 |
+
# keep the actual buffer around
|
| 1087 |
+
V.kernel.must_keep_buffers.add(name)
|
| 1088 |
+
if free_symbol_startswith(index, "tmp"):
|
| 1089 |
+
return self.indirect_load(name, index)
|
| 1090 |
+
store_cache = self.cse.store_cache
|
| 1091 |
+
if name in store_cache:
|
| 1092 |
+
return store_cache[name]
|
| 1093 |
+
return self.load(name, index)
|
| 1094 |
+
|
| 1095 |
+
@staticmethod
|
| 1096 |
+
def store(name, index, value, mode=None):
|
| 1097 |
+
self.store_buffer_names.add(name)
|
| 1098 |
+
if mode is None:
|
| 1099 |
+
self.cse.store_cache[name] = value
|
| 1100 |
+
if self.current_node:
|
| 1101 |
+
for other_name in self.current_node.get_mutations():
|
| 1102 |
+
self.cse.store_cache[other_name] = value
|
| 1103 |
+
if name not in V.graph.removed_buffers:
|
| 1104 |
+
return self.store(name, index, value, mode=mode)
|
| 1105 |
+
|
| 1106 |
+
@staticmethod
|
| 1107 |
+
def store_reduction(name, index, value):
|
| 1108 |
+
self.store_buffer_names.add(name)
|
| 1109 |
+
self.cse.store_cache[name] = value
|
| 1110 |
+
if self.current_node:
|
| 1111 |
+
for other_name in self.current_node.get_mutations():
|
| 1112 |
+
self.cse.store_cache[other_name] = value
|
| 1113 |
+
|
| 1114 |
+
if name not in V.graph.removed_buffers:
|
| 1115 |
+
return self.store_reduction(name, index, value)
|
| 1116 |
+
|
| 1117 |
+
@staticmethod
|
| 1118 |
+
def reduction(dtype, src_dtype, reduction_type, value):
|
| 1119 |
+
return self.reduction(dtype, src_dtype, reduction_type, value)
|
| 1120 |
+
|
| 1121 |
+
@staticmethod
|
| 1122 |
+
def bucketize(
|
| 1123 |
+
values,
|
| 1124 |
+
offsets_name: str,
|
| 1125 |
+
offsets_size: sympy.Expr,
|
| 1126 |
+
indexing_dtype: torch.dtype,
|
| 1127 |
+
right: bool,
|
| 1128 |
+
):
|
| 1129 |
+
"""
|
| 1130 |
+
[Note: Inductor bucketize op]
|
| 1131 |
+
|
| 1132 |
+
Given values (tensor) and offsets_name (reference to the name of a 1D
|
| 1133 |
+
tensor), calculate the bucket that each value belongs to.
|
| 1134 |
+
|
| 1135 |
+
e.g. for values [-1, 0, 1, 2, 3, 4, 5, 9], offsets [0, 4, 4, 8], right=True
|
| 1136 |
+
return = [ 0, 1, 1, 1, 1, 3, 3, 4].
|
| 1137 |
+
|
| 1138 |
+
When right == False, bucket i refers to range (offsets[i], offsets[i+1]].
|
| 1139 |
+
When right == True, bucket i refers to range [offsets[i], offsets[i+1]).
|
| 1140 |
+
|
| 1141 |
+
Offsets must be non-decreasing or the result is undefined.
|
| 1142 |
+
"""
|
| 1143 |
+
return self.bucketize(
|
| 1144 |
+
values, offsets_name, offsets_size, indexing_dtype, right
|
| 1145 |
+
)
|
| 1146 |
+
|
| 1147 |
+
super().__enter__()
|
| 1148 |
+
assert self.overrides
|
| 1149 |
+
parent_handler = self.overrides(V.get_ops_handler())
|
| 1150 |
+
self.exit_stack.enter_context(V.set_ops_handler(CSEProxy()))
|
| 1151 |
+
self.exit_stack.enter_context(V.set_kernel_handler(self))
|
| 1152 |
+
return self
|
| 1153 |
+
|
| 1154 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 1155 |
+
"""
|
| 1156 |
+
Note that V.graph.scheduler can be None when codegening triton template
|
| 1157 |
+
kernels.
|
| 1158 |
+
"""
|
| 1159 |
+
if V.graph.scheduler:
|
| 1160 |
+
V.graph.scheduler.remove_kernel_local_buffers()
|
| 1161 |
+
super().__exit__(exc_type, exc_val, exc_tb)
|
| 1162 |
+
|
| 1163 |
+
def generate_assert(self, check):
|
| 1164 |
+
return (check or config.debug_index_asserts) and config.assert_indirect_indexing
|
| 1165 |
+
|
| 1166 |
+
def load_mask(self, var):
|
| 1167 |
+
# only the triton kernel requires mask
|
| 1168 |
+
return ""
|
| 1169 |
+
|
| 1170 |
+
def rename_indexing(self, index) -> sympy.Expr:
|
| 1171 |
+
# adds the necessary kernel args for index expressions
|
| 1172 |
+
# and renames variables in index expressions to kernel arg names
|
| 1173 |
+
if isinstance(index, (list, tuple)):
|
| 1174 |
+
return [self.rename_indexing(x) for x in index]
|
| 1175 |
+
index = V.graph.sizevars.simplify(index)
|
| 1176 |
+
sorted_symbols = sorted(index.free_symbols, key=lambda s: s.name)
|
| 1177 |
+
replacements = {
|
| 1178 |
+
x: self.args.size(x)
|
| 1179 |
+
for x in sorted_symbols
|
| 1180 |
+
if x.name.startswith("s")
|
| 1181 |
+
or x.name.startswith("ps")
|
| 1182 |
+
or (x.name.startswith("i") and not x.name.startswith("idx"))
|
| 1183 |
+
}
|
| 1184 |
+
return sympy_subs(index, replacements)
|
| 1185 |
+
|
| 1186 |
+
def create_cse_var(self, *args, **kwargs):
|
| 1187 |
+
return CSEVariable(*args, **kwargs)
|
| 1188 |
+
|
| 1189 |
+
|
| 1190 |
+
@dataclasses.dataclass
|
| 1191 |
+
class OptimizationContext:
|
| 1192 |
+
key: ClassVar[str] = "opt_ctx"
|
| 1193 |
+
|
| 1194 |
+
# Load value as mask
|
| 1195 |
+
is_load_as_mask: bool = False
|
| 1196 |
+
|
| 1197 |
+
dtype: Optional[torch.dtype] = None
|
| 1198 |
+
ops_name: str = ""
|
| 1199 |
+
is_most_inner_loop_irrevelant: bool = False
|
| 1200 |
+
|
| 1201 |
+
# Load uint8 value as float32
|
| 1202 |
+
is_load_uint8_as_float: bool = False
|
| 1203 |
+
|
| 1204 |
+
|
| 1205 |
+
@functools.lru_cache(None)
|
| 1206 |
+
def jinja2_env():
|
| 1207 |
+
try:
|
| 1208 |
+
import jinja2
|
| 1209 |
+
|
| 1210 |
+
return jinja2.Environment(
|
| 1211 |
+
undefined=jinja2.StrictUndefined,
|
| 1212 |
+
)
|
| 1213 |
+
except ImportError:
|
| 1214 |
+
return None
|
| 1215 |
+
|
| 1216 |
+
|
| 1217 |
+
class ChoiceCaller:
|
| 1218 |
+
"""
|
| 1219 |
+
Represents a possible choice used in autotune_process.py.
|
| 1220 |
+
During autotuning, self.benchmark() is first called to get benchmark result,
|
| 1221 |
+
and if this choice is selected, self.output_node() is called to get the output_node.
|
| 1222 |
+
|
| 1223 |
+
Children classes: TritonTemplateCaller, CUDATemplateCaller.
|
| 1224 |
+
"""
|
| 1225 |
+
|
| 1226 |
+
def __init__(self, name, input_nodes, layout):
|
| 1227 |
+
super().__init__()
|
| 1228 |
+
self.name = name
|
| 1229 |
+
self.layout = layout
|
| 1230 |
+
self.input_nodes = input_nodes
|
| 1231 |
+
|
| 1232 |
+
def benchmark(self, *args, out) -> float:
|
| 1233 |
+
algo = self.to_callable()
|
| 1234 |
+
return do_bench(lambda: algo(*args, out=out))
|
| 1235 |
+
|
| 1236 |
+
def call_name(self) -> str:
|
| 1237 |
+
raise NotImplementedError()
|
| 1238 |
+
|
| 1239 |
+
def to_callable(self):
|
| 1240 |
+
raise NotImplementedError()
|
| 1241 |
+
|
| 1242 |
+
def hash_key(self) -> str:
|
| 1243 |
+
raise NotImplementedError()
|
| 1244 |
+
|
| 1245 |
+
def output_node(self) -> "TensorBox": # type: ignore[name-defined]
|
| 1246 |
+
raise NotImplementedError()
|
| 1247 |
+
|
| 1248 |
+
|
| 1249 |
+
class KernelTemplate:
|
| 1250 |
+
"""
|
| 1251 |
+
Base class for defining kernel templates.
|
| 1252 |
+
|
| 1253 |
+
Children classes: TritonTemplate, CUDATemplate
|
| 1254 |
+
"""
|
| 1255 |
+
|
| 1256 |
+
@staticmethod
|
| 1257 |
+
def _template_from_string(source):
|
| 1258 |
+
env = jinja2_env()
|
| 1259 |
+
if env is not None:
|
| 1260 |
+
return env.from_string(source)
|
| 1261 |
+
return None
|
| 1262 |
+
|
| 1263 |
+
@staticmethod
|
| 1264 |
+
def _fake_get_dtype(fake_out):
|
| 1265 |
+
_get_dtype_real = V.graph.get_dtype
|
| 1266 |
+
|
| 1267 |
+
def get_dtype(name):
|
| 1268 |
+
if name == fake_out.get_name():
|
| 1269 |
+
return fake_out.get_dtype()
|
| 1270 |
+
return _get_dtype_real(name)
|
| 1271 |
+
|
| 1272 |
+
return get_dtype
|
| 1273 |
+
|
| 1274 |
+
def __init__(self, name: str):
|
| 1275 |
+
self.name = name
|
| 1276 |
+
|
| 1277 |
+
def maybe_append_choice(self, choices, **kwargs):
|
| 1278 |
+
"""
|
| 1279 |
+
Maybe generates a new ChoiceCaller and appends it into existing choices.
|
| 1280 |
+
|
| 1281 |
+
choices: A list of ChoiceCallers.
|
| 1282 |
+
kwargs: Additional kwargs to be passed to self.generate() to generate a new ChoiceCaller.
|
| 1283 |
+
"""
|
| 1284 |
+
|
| 1285 |
+
try:
|
| 1286 |
+
choices.append(self.generate(**kwargs))
|
| 1287 |
+
except NotImplementedError:
|
| 1288 |
+
pass
|
| 1289 |
+
|
| 1290 |
+
def generate(self, **kwargs) -> ChoiceCaller:
|
| 1291 |
+
"""
|
| 1292 |
+
Generates a ChoiceCaller instance from the given arguments.
|
| 1293 |
+
"""
|
| 1294 |
+
|
| 1295 |
+
raise NotImplementedError()
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <algorithm>
|
| 4 |
+
#include <atomic>
|
| 5 |
+
#include <cmath>
|
| 6 |
+
#include <cstdlib>
|
| 7 |
+
#include <limits>
|
| 8 |
+
#include <omp.h>
|
| 9 |
+
|
| 10 |
+
#include <ATen/NumericUtils.h>
|
| 11 |
+
#include <ATen/core/PhiloxRNGEngine.h>
|
| 12 |
+
#include <ATen/native/Math.h>
|
| 13 |
+
|
| 14 |
+
#include <c10/util/BFloat16.h>
|
| 15 |
+
#include <c10/util/BFloat16-math.h>
|
| 16 |
+
#include <c10/util/generic_math.h>
|
| 17 |
+
#include <c10/util/Half.h>
|
| 18 |
+
#include <c10/util/TypeCast.h>
|
| 19 |
+
|
| 20 |
+
#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_ZVECTOR)
|
| 21 |
+
#define INDUCTOR_USE_VECTOR_TYPES() 1
|
| 22 |
+
#else
|
| 23 |
+
#define INDUCTOR_USE_VECTOR_TYPES() 0
|
| 24 |
+
#endif
|
| 25 |
+
|
| 26 |
+
#if INDUCTOR_USE_VECTOR_TYPES()
|
| 27 |
+
#include <ATen/cpu/vec/functional.h>
|
| 28 |
+
#include <ATen/cpu/vec/vec.h>
|
| 29 |
+
#endif
|
| 30 |
+
|
| 31 |
+
typedef at::Half half;
|
| 32 |
+
typedef at::BFloat16 bfloat16;
|
| 33 |
+
|
| 34 |
+
template <typename T>
|
| 35 |
+
struct Welford {
|
| 36 |
+
T mean = T(0);
|
| 37 |
+
T m2 = T(0);
|
| 38 |
+
T weight = T(0);
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
template <typename T>
|
| 43 |
+
struct IsVecType: std::false_type {};
|
| 44 |
+
|
| 45 |
+
#if INDUCTOR_USE_VECTOR_TYPES()
|
| 46 |
+
template <typename T>
|
| 47 |
+
struct IsVecType<at::vec::Vectorized<T>>: std::true_type {};
|
| 48 |
+
#endif
|
| 49 |
+
|
| 50 |
+
template <typename T>
|
| 51 |
+
Welford<T> welford_combine(const Welford<T> &a, const Welford<T> &b) {
|
| 52 |
+
if constexpr (!IsVecType<T>::value) {
|
| 53 |
+
if (a.weight == 0) {
|
| 54 |
+
return b;
|
| 55 |
+
}
|
| 56 |
+
if (b.weight == 0) {
|
| 57 |
+
return a;
|
| 58 |
+
}
|
| 59 |
+
}
|
| 60 |
+
auto delta = b.mean - a.mean;
|
| 61 |
+
auto new_weight = a.weight + b.weight;
|
| 62 |
+
auto wb_over_w = b.weight / new_weight;
|
| 63 |
+
if constexpr (IsVecType<T>::value) {
|
| 64 |
+
// Guard against division by zero
|
| 65 |
+
wb_over_w = T::blendv(wb_over_w, T(0), new_weight == T(0));
|
| 66 |
+
}
|
| 67 |
+
auto result = Welford<T>{
|
| 68 |
+
a.mean + delta * wb_over_w,
|
| 69 |
+
a.m2 + b.m2 + delta * delta * a.weight * wb_over_w,
|
| 70 |
+
new_weight
|
| 71 |
+
};
|
| 72 |
+
return result;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
template <typename T>
|
| 76 |
+
Welford<T> welford_combine(const Welford<T> &acc, T data) {
|
| 77 |
+
// Add a single data point
|
| 78 |
+
auto delta = data - acc.mean;
|
| 79 |
+
auto new_weight = acc.weight + T(1);
|
| 80 |
+
auto new_mean = acc.mean + delta / new_weight;
|
| 81 |
+
auto new_delta = data - new_mean;
|
| 82 |
+
auto result = Welford<T>{
|
| 83 |
+
new_mean,
|
| 84 |
+
acc.m2 + delta * new_delta,
|
| 85 |
+
new_weight
|
| 86 |
+
};
|
| 87 |
+
return result;
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
#if INDUCTOR_USE_VECTOR_TYPES()
|
| 92 |
+
template <typename scalar_t>
|
| 93 |
+
inline at::vec::Vectorized<scalar_t> vec_shuffle_down(at::vec::Vectorized<scalar_t> x, size_t n) {
|
| 94 |
+
using Vec = at::vec::Vectorized<scalar_t>;
|
| 95 |
+
alignas(alignof(Vec)) scalar_t array[Vec::size()];
|
| 96 |
+
x.store(array);
|
| 97 |
+
for (size_t i = 0; i + n < Vec::size(); i += 2 * n) {
|
| 98 |
+
array[i] = array[i + n];
|
| 99 |
+
}
|
| 100 |
+
return Vec::loadu(array);
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
#ifdef CPU_CAPABILITY_AVX2
|
| 104 |
+
inline at::vec::Vectorized<float> vec_shuffle_down(at::vec::Vectorized<float> x, size_t n) {
|
| 105 |
+
using vec_t = at::vec::Vectorized<float>;
|
| 106 |
+
#define SHUFFLE_MASK(z, y, x, w) ((z << 6) | (y << 4) | (x << 2) | w)
|
| 107 |
+
switch (n) {
|
| 108 |
+
case 1:
|
| 109 |
+
return vec_t(_mm256_permute_ps(x, SHUFFLE_MASK(1, 1, 3, 3)));
|
| 110 |
+
case 2:
|
| 111 |
+
return vec_t(_mm256_permute_ps(x, SHUFFLE_MASK(2, 2, 2, 2)));
|
| 112 |
+
case 4:
|
| 113 |
+
return vec_t(_mm256_permute2f128_ps(x, x, SHUFFLE_MASK(1, 1, 1, 1)));
|
| 114 |
+
}
|
| 115 |
+
TORCH_CHECK(false, "Unhandled vec_shuffle_down value ", n);
|
| 116 |
+
}
|
| 117 |
+
#endif
|
| 118 |
+
|
| 119 |
+
template <typename scalar_t>
|
| 120 |
+
Welford<scalar_t> welford_vec_reduce_all(Welford<at::vec::Vectorized<scalar_t>> acc) {
|
| 121 |
+
using Vec = at::vec::Vectorized<scalar_t>;
|
| 122 |
+
for (size_t n = 1; n < Vec::size(); n *= 2) {
|
| 123 |
+
auto shuffled = Welford<Vec>{
|
| 124 |
+
vec_shuffle_down(acc.mean, n),
|
| 125 |
+
vec_shuffle_down(acc.m2, n),
|
| 126 |
+
vec_shuffle_down(acc.weight, n)
|
| 127 |
+
};
|
| 128 |
+
acc = welford_combine(acc, shuffled);
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
Welford<scalar_t> result;
|
| 132 |
+
alignas(alignof(Vec)) scalar_t array[Vec::size()];
|
| 133 |
+
acc.mean.store(array);
|
| 134 |
+
result.mean = array[0];
|
| 135 |
+
|
| 136 |
+
acc.m2.store(array);
|
| 137 |
+
result.m2 = array[0];
|
| 138 |
+
|
| 139 |
+
acc.weight.store(array);
|
| 140 |
+
result.weight = array[0];
|
| 141 |
+
|
| 142 |
+
return result;
|
| 143 |
+
}
|
| 144 |
+
#endif
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
template <typename T> inline T mod(T a, T b) { return a % b; }
|
| 148 |
+
template <> inline float mod(float a, float b) { return std::fmod(a, b); }
|
| 149 |
+
template <> inline double mod(double a, double b) { return std::fmod(a, b); }
|
| 150 |
+
|
| 151 |
+
template <typename scalar_t>
|
| 152 |
+
inline scalar_t max_propagate_nan(scalar_t a, scalar_t b) {
|
| 153 |
+
if (at::_isnan(a)) {
|
| 154 |
+
return a;
|
| 155 |
+
}
|
| 156 |
+
return a > b ? a : b;
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
template <typename scalar_t>
|
| 160 |
+
inline scalar_t min_propagate_nan(scalar_t a, scalar_t b) {
|
| 161 |
+
if (at::_isnan(a)) {
|
| 162 |
+
return a;
|
| 163 |
+
}
|
| 164 |
+
return a < b ? a : b;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
constexpr float uint32_to_uniform_float(uint32_t value) {
|
| 168 |
+
// maximum value such that `MAX_INT * scale < 1.0` (with float rounding)
|
| 169 |
+
constexpr float scale = 4.6566127342e-10;
|
| 170 |
+
return static_cast<float>(value & 0x7FFFFFFF) * scale;
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
float normalized_rand_cpu(uint32_t seed, uint32_t offset) {
|
| 174 |
+
return uint32_to_uniform_float(at::Philox4_32(seed, 0, offset)());
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
float randn_cpu(uint32_t seed, uint32_t offset) {
|
| 178 |
+
at::Philox4_32 engine(seed, 0, offset);
|
| 179 |
+
return engine.randn(10);
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
uint64_t randint64_cpu(uint32_t seed, uint32_t offset, int64_t low, int64_t high) {
|
| 183 |
+
auto gen = at::Philox4_32(seed, 0, offset);
|
| 184 |
+
uint64_t r0 = gen();
|
| 185 |
+
uint64_t r1 = gen();
|
| 186 |
+
uint64_t result = r0 | (r1 << 32);
|
| 187 |
+
return (result % static_cast<uint64_t>(high - low)) + low;
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
template <typename T> struct AsIntegerType { typedef T type; };
|
| 191 |
+
template <> struct AsIntegerType<float> { typedef uint32_t type; };
|
| 192 |
+
template <> struct AsIntegerType<double> { typedef uint64_t type; };
|
| 193 |
+
template <> struct AsIntegerType<bfloat16> { typedef uint16_t type; };
|
| 194 |
+
|
| 195 |
+
template <typename T>
|
| 196 |
+
typename std::enable_if<!std::is_reduced_floating_point<T>::value, T>::type
|
| 197 |
+
inline fetch_value(volatile T *addr) {
|
| 198 |
+
return *addr;
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
template <typename T>
|
| 202 |
+
typename std::enable_if<std::is_reduced_floating_point<T>::value, T>::type
|
| 203 |
+
inline fetch_value(volatile T *addr) {
|
| 204 |
+
return T(addr->x, T::from_bits());
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
template <typename T>
|
| 208 |
+
typename std::enable_if<!std::is_integral<T>::value>::type
|
| 209 |
+
atomic_add(volatile T *addr, T offset) {
|
| 210 |
+
typedef typename AsIntegerType<T>::type alt_type;
|
| 211 |
+
|
| 212 |
+
static_assert(sizeof(std::atomic<alt_type>) == sizeof(T),
|
| 213 |
+
"std::atomic issue");
|
| 214 |
+
|
| 215 |
+
alt_type expected;
|
| 216 |
+
|
| 217 |
+
alt_type desired;
|
| 218 |
+
|
| 219 |
+
std::atomic<alt_type> *atomic_addr = (std::atomic<alt_type> *)addr;
|
| 220 |
+
do {
|
| 221 |
+
T val = fetch_value(addr);
|
| 222 |
+
reinterpret_cast<T *>(&expected)[0] = val;
|
| 223 |
+
reinterpret_cast<T *>(&desired)[0] = val + offset;
|
| 224 |
+
} while (!atomic_addr->compare_exchange_weak(expected, desired,
|
| 225 |
+
std::memory_order_relaxed));
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
// Since C++20 float is supported by fetch_add, but the performance may not
|
| 229 |
+
// better than compare_exchange_weak, which can be checked by microbenchmark
|
| 230 |
+
// inductor_cpu_atomic.py
|
| 231 |
+
template <typename T>
|
| 232 |
+
typename std::enable_if<std::is_integral<T>::value>::type
|
| 233 |
+
atomic_add(volatile T *addr, T offset) {
|
| 234 |
+
static_assert(sizeof(std::atomic<T>) == sizeof(T),
|
| 235 |
+
"std::atomic issue");
|
| 236 |
+
std::atomic<T> *atomic_addr = (std::atomic<T> *)addr;
|
| 237 |
+
atomic_addr->fetch_add(offset, std::memory_order_relaxed);
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
// This function is used to convert bool or uint8 to float mask for
|
| 241 |
+
// vectorization. The caller needs to make sure the src represents TRUE/FALSE
|
| 242 |
+
// correctly.
|
| 243 |
+
template <typename T>
|
| 244 |
+
inline float flag_to_float_scalar(T src) {
|
| 245 |
+
float ret;
|
| 246 |
+
*(uint32_t*)(&ret) = src ? 0xFFFFFFFF : 0;
|
| 247 |
+
return ret;
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_ZVECTOR)
|
| 251 |
+
|
| 252 |
+
inline at::vec::Vectorized<float> masked_load(const float* src, at::vec::Vectorized<float> mask) {
|
| 253 |
+
# if defined(CPU_CAPABILITY_AVX512)
|
| 254 |
+
at::vec::Vectorized<float> zero_vec(0);
|
| 255 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 256 |
+
auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ);
|
| 257 |
+
return _mm512_mask_loadu_ps(zero_vec, mmask, src);
|
| 258 |
+
# elif defined(CPU_CAPABILITY_AVX2)
|
| 259 |
+
auto all_ones = _mm256_set1_epi32(0xFFFFFFFF);
|
| 260 |
+
auto mmask = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones);
|
| 261 |
+
return _mm256_maskload_ps(src, mmask);
|
| 262 |
+
# elif defined(CPU_CAPABILITY_ZVECTOR)
|
| 263 |
+
auto result = at::vec::Vectorized<float>::loadu(src);
|
| 264 |
+
return (result & mask);
|
| 265 |
+
# else
|
| 266 |
+
# error Unsupported vectorization CPU capability
|
| 267 |
+
# endif
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
template <typename T>
|
| 271 |
+
typename std::enable_if<std::is_same<T, bfloat16>::value || std::is_same<T, half>::value, at::vec::Vectorized<T>>::type
|
| 272 |
+
inline masked_load(const T* src, at::vec::Vectorized<float> mask) {
|
| 273 |
+
# if defined(CPU_CAPABILITY_AVX512)
|
| 274 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 275 |
+
auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ);
|
| 276 |
+
auto zero = _mm256_set1_epi16(0);
|
| 277 |
+
auto temp = _mm256_mask_loadu_epi16(zero, mmask, src);
|
| 278 |
+
return _mm512_inserti32x8(_mm512_castsi256_si512(temp), zero, 1);
|
| 279 |
+
# elif defined(CPU_CAPABILITY_AVX2)
|
| 280 |
+
auto all_ones = _mm256_set1_epi32(0xFFFFFFFF);
|
| 281 |
+
auto mmask_vec = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones);
|
| 282 |
+
__at_align__ uint32_t mmask[8];
|
| 283 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(mmask), mmask_vec);
|
| 284 |
+
__at_align__ uint16_t result[16];
|
| 285 |
+
for (auto i = 0; i < 8; i++) {
|
| 286 |
+
result[i] = mmask[i] == 0xFFFFFFFF ? src[i].x: uint16_t(0);
|
| 287 |
+
}
|
| 288 |
+
return at::vec::Vectorized<T>::loadu(result);
|
| 289 |
+
# elif defined(CPU_CAPABILITY_ZVECTOR)
|
| 290 |
+
auto result = at::vec::Vectorized<T>::loadu(src, 8);
|
| 291 |
+
uint32_t maskdata[8] = { 0 };
|
| 292 |
+
uint16_t maskdata_dest[16] = { 0 };
|
| 293 |
+
mask.store(maskdata);
|
| 294 |
+
for (auto i = 0; i < 8; i++) {
|
| 295 |
+
maskdata_dest[i] = (maskdata[i] == 0xFFFFFFFF) ? 0xFFFF: 0;
|
| 296 |
+
}
|
| 297 |
+
auto maskvector = at::vec::Vectorized<T>::loadu(maskdata_dest);
|
| 298 |
+
return (result & maskvector);
|
| 299 |
+
# else
|
| 300 |
+
# error Unsupported vectorization CPU capability
|
| 301 |
+
# endif
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
inline at::vec::Vectorized<uint8_t> masked_load(const uint8_t* src, at::vec::Vectorized<float> mask) {
|
| 305 |
+
# if defined(CPU_CAPABILITY_AVX512)
|
| 306 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 307 |
+
auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ);
|
| 308 |
+
auto zero = _mm_set1_epi8(0);
|
| 309 |
+
auto temp = _mm_mask_loadu_epi8(zero, mmask, src);
|
| 310 |
+
return _mm512_inserti64x2(_mm512_set1_epi32(0), temp, 0);
|
| 311 |
+
# elif defined(CPU_CAPABILITY_AVX2)
|
| 312 |
+
auto all_ones = _mm256_set1_epi32(0xFFFFFFFF);
|
| 313 |
+
auto mmask_vec = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones);
|
| 314 |
+
__at_align__ uint32_t mmask[8];
|
| 315 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(mmask), mmask_vec);
|
| 316 |
+
__at_align__ uint8_t result[32];
|
| 317 |
+
for (auto i = 0; i < 8; i++) {
|
| 318 |
+
result[i] = mmask[i] == 0xFFFFFFFF ? src[i]: uint8_t(0);
|
| 319 |
+
}
|
| 320 |
+
return at::vec::Vectorized<uint8_t>::loadu(result);
|
| 321 |
+
# elif defined(CPU_CAPABILITY_ZVECTOR)
|
| 322 |
+
auto result = at::vec::Vectorized<uint8_t>::loadu(src, 8);
|
| 323 |
+
uint32_t maskdata[8];
|
| 324 |
+
uint8_t maskdata_dest[32] = { 0 };
|
| 325 |
+
mask.store(maskdata);
|
| 326 |
+
for (auto i = 0; i < 8; i++) {
|
| 327 |
+
maskdata_dest[i] = (maskdata[i] == 0xFFFFFFFF) ? 0xFF: 0;
|
| 328 |
+
}
|
| 329 |
+
auto maskvector = at::vec::Vectorized<uint8_t>::loadu(maskdata_dest);
|
| 330 |
+
return (result & maskvector);
|
| 331 |
+
# else
|
| 332 |
+
# error Unsupported vectorization CPU capability
|
| 333 |
+
# endif
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
template <typename T>
|
| 337 |
+
inline at::vec::Vectorized<float> flag_to_float_vec(const T* src) {
|
| 338 |
+
__at_align__ float dst_tmp[at::vec::Vectorized<float>::size()];
|
| 339 |
+
#pragma unroll
|
| 340 |
+
for (int64_t i = 0; i < at::vec::Vectorized<float>::size(); i++) {
|
| 341 |
+
dst_tmp[i] = flag_to_float_scalar(src[i]);
|
| 342 |
+
}
|
| 343 |
+
return at::vec::Vectorized<float>::loadu(dst_tmp);
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
template <typename scalar_t>
|
| 347 |
+
inline at::vec::Vectorized<float> cvt_lowp_fp_to_fp32(
|
| 348 |
+
at::vec::Vectorized<scalar_t> src) {
|
| 349 |
+
at::vec::Vectorized<float> res_vec1(0);
|
| 350 |
+
at::vec::Vectorized<float> res_vec2(0);
|
| 351 |
+
std::tie(res_vec1, res_vec2) = at::vec::convert_to_float<scalar_t>(src);
|
| 352 |
+
return res_vec1;
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
template <typename scalar_t>
|
| 356 |
+
inline at::vec::Vectorized<scalar_t> cvt_fp32_to_lowp_fp(
|
| 357 |
+
at::vec::Vectorized<float> src) {
|
| 358 |
+
return at::vec::convert_from_float<scalar_t>(src, src);
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
inline at::vec::Vectorized<float> mask_convert_to_float(at::vec::Vectorized<float> src) {
|
| 362 |
+
auto zeros = at::vec::Vectorized<float>(0);
|
| 363 |
+
auto ones = at::vec::Vectorized<float>(1);
|
| 364 |
+
return at::vec::Vectorized<float>::blendv(zeros, ones, src);
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
template <typename SRC>
|
| 368 |
+
inline at::vec::Vectorized<float> vec_convert_to_mask(at::vec::Vectorized<SRC> src) {
|
| 369 |
+
assert(
|
| 370 |
+
at::vec::Vectorized<float>::size() == at::vec::Vectorized<SRC>::size());
|
| 371 |
+
at::vec::Vectorized<float> res_vec(0);
|
| 372 |
+
__at_align__ float dst_tmp[at::vec::Vectorized<float>::size()];
|
| 373 |
+
__at_align__ SRC src_tmp[at::vec::Vectorized<SRC>::size()];
|
| 374 |
+
src.store(src_tmp);
|
| 375 |
+
|
| 376 |
+
#pragma unroll
|
| 377 |
+
for (int i = 0; i < at::vec::Vectorized<float>::size(); i++) {
|
| 378 |
+
*(uint32_t*)(dst_tmp + i) = src_tmp[i] ? 0xFFFFFFFF : 0;
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
return res_vec.loadu(dst_tmp);
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
template <typename SRC>
|
| 385 |
+
inline at::vec::Vectorized<float> to_float_mask(at::vec::Vectorized<SRC> src) {
|
| 386 |
+
return vec_convert_to_mask(src);
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2)
|
| 390 |
+
template <>
|
| 391 |
+
inline at::vec::Vectorized<float> to_float_mask(at::vec::Vectorized<int> src) {
|
| 392 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 393 |
+
return at::vec::Vectorized<float>(_mm256_castsi256_ps(src));
|
| 394 |
+
#else
|
| 395 |
+
return at::vec::Vectorized<float>(_mm512_castsi512_ps(src));
|
| 396 |
+
#endif
|
| 397 |
+
}
|
| 398 |
+
#endif
|
| 399 |
+
|
| 400 |
+
template <>
|
| 401 |
+
inline at::vec::Vectorized<float> to_float_mask(at::vec::Vectorized<float> src) {
|
| 402 |
+
return src;
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
inline at::vec::Vectorized<float> to_float_mask(int src) {
|
| 406 |
+
float mask;
|
| 407 |
+
*(uint32_t*)&mask = src ? 0xFFFFFFFF : 0;
|
| 408 |
+
return at::vec::Vectorized<float>(mask);
|
| 409 |
+
}
|
| 410 |
+
#endif
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__init__.py
ADDED
|
File without changes
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_kernel.cpython-310.pyc
ADDED
|
Binary file (10.9 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from typing import cast, List
|
| 3 |
+
|
| 4 |
+
from ...._dynamo.utils import counters
|
| 5 |
+
|
| 6 |
+
from ... import config, ir
|
| 7 |
+
from ...codecache import code_hash, get_path
|
| 8 |
+
from ...ir import ComputedBuffer, CUDATemplateBuffer, Pointwise
|
| 9 |
+
from ...scheduler import (
|
| 10 |
+
BaseSchedulerNode,
|
| 11 |
+
BaseScheduling,
|
| 12 |
+
FusedSchedulerNode,
|
| 13 |
+
Scheduler,
|
| 14 |
+
SchedulerNode,
|
| 15 |
+
)
|
| 16 |
+
from ...utils import get_fused_kernel_name, get_kernel_metadata, sympy_product
|
| 17 |
+
from ...virtualized import V
|
| 18 |
+
from ..common import IndentedBuffer
|
| 19 |
+
|
| 20 |
+
from .cutlass_epilogue_gen import CUTLASSEVTOpNotImplementedError
|
| 21 |
+
|
| 22 |
+
log = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class CUDACPPScheduling(BaseScheduling):
|
| 26 |
+
"""
|
| 27 |
+
Partial Scheduling implementation for CUDA C++ Kernels.
|
| 28 |
+
This class is intended to be used in combination with TritonScheduling,
|
| 29 |
+
and delegated to by CUDACombinedScheduling.
|
| 30 |
+
|
| 31 |
+
It handles fusion decisions and CUDA C++ specific template code generation.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def __init__(self, scheduler: Scheduler):
|
| 35 |
+
super().__init__()
|
| 36 |
+
self.scheduler = scheduler
|
| 37 |
+
|
| 38 |
+
def group_fn(self, sizes):
|
| 39 |
+
return tuple(V.graph.sizevars.simplify(sympy_product(s)) for s in sizes)
|
| 40 |
+
|
| 41 |
+
def is_cuda_cpp_template(self, node: BaseSchedulerNode) -> bool:
|
| 42 |
+
return isinstance(node, SchedulerNode) and isinstance(
|
| 43 |
+
node.node, CUDATemplateBuffer
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
def is_cuda_cpp_fused_template(self, node: BaseSchedulerNode) -> bool:
|
| 47 |
+
return isinstance(node, FusedSchedulerNode) and self.is_cuda_cpp_template(
|
| 48 |
+
node.get_template_node()
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
def _can_fuse_epilogue_impl(
|
| 52 |
+
self,
|
| 53 |
+
cuda_template_buffer: CUDATemplateBuffer,
|
| 54 |
+
epilogue_nodes: List[ir.IRNode],
|
| 55 |
+
additional_node: ir.IRNode,
|
| 56 |
+
) -> bool:
|
| 57 |
+
"""
|
| 58 |
+
Check if the given node can be fused with the epilogue. At the moment, Kernels
|
| 59 |
+
support fusion with Pointwise operations, wrapped in (named) ComputedBuffer nodes.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
cuda_template_buffer : A CUDATemplateBuffer object representing the CUDA template and it's result buffer
|
| 63 |
+
epilogue_nodes : List[ir.Buffer]: The list of already fused epilogue nodes.
|
| 64 |
+
additional_node: The ir.Buffer node to be checked if it can be fused with the epilogue.
|
| 65 |
+
Returns:
|
| 66 |
+
- bool: True if the given node can be fused with the epilogue, False otherwise.
|
| 67 |
+
|
| 68 |
+
"""
|
| 69 |
+
if not isinstance(cuda_template_buffer, CUDATemplateBuffer):
|
| 70 |
+
return False
|
| 71 |
+
if not cuda_template_buffer.template.can_fuse_epilogue:
|
| 72 |
+
# The used GEMM op does not support fusing epilogues
|
| 73 |
+
return False
|
| 74 |
+
if not isinstance(additional_node, ComputedBuffer):
|
| 75 |
+
return False
|
| 76 |
+
if not isinstance(additional_node.data, Pointwise):
|
| 77 |
+
return False
|
| 78 |
+
# We can fuse a Pointwise op that depends on the last fused epilogue node
|
| 79 |
+
# if any. If there is no epilogue node yet, it needs to depend on the template
|
| 80 |
+
# node
|
| 81 |
+
node_name = additional_node.get_computed_buffer_name()
|
| 82 |
+
if node_name is None:
|
| 83 |
+
return False
|
| 84 |
+
|
| 85 |
+
if len(epilogue_nodes) == 0:
|
| 86 |
+
if cuda_template_buffer.name not in additional_node.get_read_names():
|
| 87 |
+
return False
|
| 88 |
+
else:
|
| 89 |
+
last_epilogue_node = epilogue_nodes[-1]
|
| 90 |
+
assert isinstance(last_epilogue_node, ir.ComputedBuffer) # for mypy
|
| 91 |
+
last_epilogue_name = (
|
| 92 |
+
last_epilogue_node.name
|
| 93 |
+
if last_epilogue_node.name is not None
|
| 94 |
+
else last_epilogue_node.data.name # type: ignore[attr-defined]
|
| 95 |
+
)
|
| 96 |
+
if last_epilogue_name not in additional_node.get_read_names():
|
| 97 |
+
return False
|
| 98 |
+
if additional_node.layout != cuda_template_buffer.layout:
|
| 99 |
+
return False
|
| 100 |
+
try:
|
| 101 |
+
from torch._inductor.codegen.cuda.cutlass_epilogue_gen import (
|
| 102 |
+
CutlassEVTEpilogueArgumentFormatter,
|
| 103 |
+
CutlassEVTEpilogueTypeFormatter,
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
CutlassEVTEpilogueTypeFormatter.ir_to_evt_string(
|
| 107 |
+
cast(str, cuda_template_buffer.name), "anything", [additional_node]
|
| 108 |
+
)
|
| 109 |
+
CutlassEVTEpilogueArgumentFormatter.ir_to_evt_argument_string(
|
| 110 |
+
cast(str, cuda_template_buffer.name), [additional_node]
|
| 111 |
+
)
|
| 112 |
+
except CUTLASSEVTOpNotImplementedError as e:
|
| 113 |
+
not_implemented_op = str(e)
|
| 114 |
+
if not_implemented_op.startswith("_op_"):
|
| 115 |
+
not_implemented_op = not_implemented_op[4:]
|
| 116 |
+
log.warning(
|
| 117 |
+
f"Cannot fuse epilogue node {additional_node} into {cuda_template_buffer.name}, likely due to unsupported operation: {not_implemented_op}" # noqa: G004, B950
|
| 118 |
+
)
|
| 119 |
+
return False
|
| 120 |
+
else:
|
| 121 |
+
# Likely due to unsupported dtype.
|
| 122 |
+
log.warning(
|
| 123 |
+
f"Cannot fuse epilogue node {additional_node} into {cuda_template_buffer.name}. Reason: {not_implemented_op}" # noqa: G004, B950
|
| 124 |
+
)
|
| 125 |
+
return False
|
| 126 |
+
return True
|
| 127 |
+
|
| 128 |
+
@staticmethod
|
| 129 |
+
def _unwrap_epilogue_nodes(fused_node: FusedSchedulerNode) -> List[ir.IRNode]:
|
| 130 |
+
nodes = fused_node.get_nodes()
|
| 131 |
+
template_node = fused_node.get_template_node()
|
| 132 |
+
nodes.remove(template_node)
|
| 133 |
+
return [n.node for n in nodes]
|
| 134 |
+
|
| 135 |
+
def can_fuse_vertical(
|
| 136 |
+
self, node1: BaseSchedulerNode, node2: BaseSchedulerNode
|
| 137 |
+
) -> bool:
|
| 138 |
+
if self.is_cuda_cpp_template(node1) and isinstance(node2, SchedulerNode):
|
| 139 |
+
return self._can_fuse_epilogue_impl(
|
| 140 |
+
cast(CUDATemplateBuffer, node1.node), [], node2.node
|
| 141 |
+
)
|
| 142 |
+
elif self.is_cuda_cpp_fused_template(node1) and isinstance(
|
| 143 |
+
node2, SchedulerNode
|
| 144 |
+
):
|
| 145 |
+
fnode1 = cast(FusedSchedulerNode, node1)
|
| 146 |
+
return self._can_fuse_epilogue_impl(
|
| 147 |
+
fnode1.get_template_node().node,
|
| 148 |
+
self._unwrap_epilogue_nodes(fnode1),
|
| 149 |
+
node2.node,
|
| 150 |
+
)
|
| 151 |
+
return False
|
| 152 |
+
|
| 153 |
+
def define_kernel(self, src_code: str, node_schedule) -> str:
|
| 154 |
+
wrapper = V.graph.wrapper_code
|
| 155 |
+
if src_code in wrapper.src_to_kernel:
|
| 156 |
+
kernel_name = wrapper.src_to_kernel[src_code]
|
| 157 |
+
else:
|
| 158 |
+
fused_name = (
|
| 159 |
+
get_fused_kernel_name(node_schedule, config.triton.descriptive_names)
|
| 160 |
+
if config.triton.descriptive_names
|
| 161 |
+
else ""
|
| 162 |
+
)
|
| 163 |
+
kernel_name = "_".join(["cuda", fused_name, wrapper.next_kernel_suffix()])
|
| 164 |
+
# use the original src_code as the key
|
| 165 |
+
wrapper.src_to_kernel[src_code] = kernel_name
|
| 166 |
+
src_code = src_code.replace("KERNEL_NAME", kernel_name)
|
| 167 |
+
|
| 168 |
+
_, _, kernel_path = get_path(code_hash(src_code), "py")
|
| 169 |
+
|
| 170 |
+
compile_wrapper = IndentedBuffer()
|
| 171 |
+
compile_wrapper.writeline("async_compile.cuda(r'''")
|
| 172 |
+
compile_wrapper.splice(src_code, strip=True)
|
| 173 |
+
compile_wrapper.writeline("''', 'so')")
|
| 174 |
+
|
| 175 |
+
metadata_comment = f"# kernel path: {kernel_path}"
|
| 176 |
+
origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper)
|
| 177 |
+
metadata_comment += "\n" + origins + "\n" + detailed_origins
|
| 178 |
+
wrapper.define_kernel(
|
| 179 |
+
kernel_name, compile_wrapper.getvalue(), metadata_comment
|
| 180 |
+
)
|
| 181 |
+
return kernel_name
|
| 182 |
+
|
| 183 |
+
def codegen_template(
|
| 184 |
+
self, template_node: BaseSchedulerNode, epilogue_nodes: List[SchedulerNode]
|
| 185 |
+
):
|
| 186 |
+
"""
|
| 187 |
+
Codegen a CUDA template, possibly with fused epilogues
|
| 188 |
+
"""
|
| 189 |
+
counters["inductor"]["cuda_epilogue_fusion_counter"] += len(epilogue_nodes)
|
| 190 |
+
assert self.is_cuda_cpp_template(
|
| 191 |
+
template_node
|
| 192 |
+
), "Template node passed to CUDAScheduler.codegen_template must be a SchedulerNode that wraps a CUDATemplateBuffer"
|
| 193 |
+
template_node = cast(SchedulerNode, template_node)
|
| 194 |
+
_, (numel, rnumel) = template_node.group
|
| 195 |
+
assert rnumel == 1
|
| 196 |
+
ctb: CUDATemplateBuffer = cast(CUDATemplateBuffer, template_node.node)
|
| 197 |
+
epilogue_ir_nodes: List[ir.Buffer] = [n.node for n in epilogue_nodes]
|
| 198 |
+
assert all(
|
| 199 |
+
isinstance(n, ir.ComputedBuffer) for n in epilogue_ir_nodes
|
| 200 |
+
), "Epilogue nodes must all be instances of ir.ComputedBuffer"
|
| 201 |
+
kernel, render = ctb.make_kernel_render(ctb, epilogue_nodes=epilogue_ir_nodes)
|
| 202 |
+
with kernel:
|
| 203 |
+
for node in [template_node, *epilogue_nodes]:
|
| 204 |
+
node.mark_run()
|
| 205 |
+
src_code = render()
|
| 206 |
+
|
| 207 |
+
with V.set_kernel_handler(kernel):
|
| 208 |
+
node_schedule = [template_node, *epilogue_nodes]
|
| 209 |
+
kernel_name = self.define_kernel(src_code, node_schedule)
|
| 210 |
+
kernel.call_kernel(kernel_name, ctb, epilogue_ir_nodes)
|
| 211 |
+
V.graph.removed_buffers |= kernel.removed_buffers
|
| 212 |
+
self.scheduler.free_buffers()
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_env.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import logging
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from ... import config
|
| 8 |
+
|
| 9 |
+
log = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def get_cuda_arch() -> Optional[str]:
|
| 13 |
+
try:
|
| 14 |
+
cuda_arch = config.cuda.arch
|
| 15 |
+
if cuda_arch is None:
|
| 16 |
+
# Get Compute Capability of the first Visible device
|
| 17 |
+
major, minor = torch.cuda.get_device_capability(0)
|
| 18 |
+
cuda_arch = major * 10 + minor
|
| 19 |
+
return str(cuda_arch)
|
| 20 |
+
except Exception as e:
|
| 21 |
+
log.error("Error getting cuda arch: %s", e)
|
| 22 |
+
return None
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def get_cuda_version() -> Optional[str]:
|
| 26 |
+
try:
|
| 27 |
+
cuda_version = config.cuda.version
|
| 28 |
+
if cuda_version is None:
|
| 29 |
+
cuda_version = torch.version.cuda
|
| 30 |
+
return cuda_version
|
| 31 |
+
except Exception as e:
|
| 32 |
+
log.error("Error getting cuda version: %s", e)
|
| 33 |
+
return None
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@functools.lru_cache(None)
|
| 37 |
+
def nvcc_exist(nvcc_path: str = "nvcc") -> bool:
|
| 38 |
+
if nvcc_path is None:
|
| 39 |
+
return False
|
| 40 |
+
import subprocess
|
| 41 |
+
|
| 42 |
+
res = subprocess.call(
|
| 43 |
+
["which", nvcc_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
|
| 44 |
+
)
|
| 45 |
+
return res == 0
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_template.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import itertools
|
| 3 |
+
import logging
|
| 4 |
+
from typing import List, Optional
|
| 5 |
+
from unittest.mock import patch
|
| 6 |
+
|
| 7 |
+
import sympy
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from ...autotune_process import CUDABenchmarkRequest, TensorMeta
|
| 11 |
+
from ...ir import Buffer, CUDATemplateBuffer, IRNode, Layout
|
| 12 |
+
|
| 13 |
+
from ...utils import IndentedBuffer, unique
|
| 14 |
+
from ...virtualized import V
|
| 15 |
+
from ..common import KernelTemplate
|
| 16 |
+
from .cuda_kernel import CUDATemplateCaller, CUDATemplateKernel
|
| 17 |
+
|
| 18 |
+
log = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class CUDATemplate(KernelTemplate):
|
| 22 |
+
index_counter = itertools.count()
|
| 23 |
+
|
| 24 |
+
def __init__(
|
| 25 |
+
self,
|
| 26 |
+
name: str,
|
| 27 |
+
input_nodes: List[Buffer],
|
| 28 |
+
layout: Layout,
|
| 29 |
+
input_reorder: Optional[List[int]] = None,
|
| 30 |
+
):
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
Baseclass for CUDA C++ Templates, derived from KernelTemplate. Not to be instantiated directly.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
name (str): The name of the CUDATemplate object.
|
| 37 |
+
input_nodes (List[IRNode]): A list of input IRNodes.
|
| 38 |
+
layout (Layout): The layout of the output buffer / tensor.
|
| 39 |
+
input_reorder (Optional[List[int]]): An optional list that specifies the order of the input nodes.
|
| 40 |
+
|
| 41 |
+
"""
|
| 42 |
+
super().__init__(name)
|
| 43 |
+
self.input_nodes = input_nodes
|
| 44 |
+
self.output_node: Buffer = Buffer("buf_out", layout)
|
| 45 |
+
self.input_reorder = input_reorder
|
| 46 |
+
self.layout = layout
|
| 47 |
+
|
| 48 |
+
def generate( # type: ignore[override]
|
| 49 |
+
self,
|
| 50 |
+
**kwargs,
|
| 51 |
+
) -> CUDATemplateCaller:
|
| 52 |
+
"""
|
| 53 |
+
Generates the CUDA template caller object for the given GEMM template and operation. This CUDATemplateCaller
|
| 54 |
+
may be used to call and benchmark the generated CUDA kernel in a standalone manner to enable Autotuning.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
kwargs: Additional keyword arguments.
|
| 58 |
+
|
| 59 |
+
Returns:
|
| 60 |
+
A CUDATemplateCaller object representing the generated CUDA template caller.
|
| 61 |
+
"""
|
| 62 |
+
kernel_name = f"cuda_{self.name}"
|
| 63 |
+
with patch.object(
|
| 64 |
+
V.graph, "get_dtype", self._fake_get_dtype(self.output_node)
|
| 65 |
+
), CUDATemplateKernel(
|
| 66 |
+
kernel_name=kernel_name,
|
| 67 |
+
) as kernel:
|
| 68 |
+
code = self.render(kernel=kernel, **kwargs)
|
| 69 |
+
_, call_args, _ = kernel.args.python_argdefs()
|
| 70 |
+
log.debug("Generated Code:\n%s", code)
|
| 71 |
+
log.debug(
|
| 72 |
+
"Args: cpp_argdefs: %s, python_argdefs: %s",
|
| 73 |
+
kernel.args.cpp_argdefs(),
|
| 74 |
+
kernel.args.python_argdefs(),
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
input_reorder = (
|
| 78 |
+
self.input_reorder
|
| 79 |
+
if self.input_reorder is not None
|
| 80 |
+
else list(range(len(self.input_nodes)))
|
| 81 |
+
)
|
| 82 |
+
expected_args = list(
|
| 83 |
+
unique(self.input_nodes[idx].get_name() for idx in input_reorder)
|
| 84 |
+
)
|
| 85 |
+
expected_args.extend([self.output_node.get_name()])
|
| 86 |
+
assert list(call_args)[: len(expected_args)] == expected_args, (
|
| 87 |
+
call_args,
|
| 88 |
+
expected_args,
|
| 89 |
+
)
|
| 90 |
+
extra_args = V.graph.sizevars.size_hints(
|
| 91 |
+
map(sympy.expand, call_args[len(expected_args) :])
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
kernel_hash_name = f"cuda_{self.name}_{next(self.index_counter)}"
|
| 95 |
+
|
| 96 |
+
# create the BenchmarkRequest
|
| 97 |
+
bmreq = CUDABenchmarkRequest(
|
| 98 |
+
kernel_name=kernel_name,
|
| 99 |
+
input_tensor_meta=TensorMeta.from_irnodes(self.input_nodes),
|
| 100 |
+
output_tensor_meta=TensorMeta.from_irnodes(self.output_node),
|
| 101 |
+
extra_args=extra_args,
|
| 102 |
+
source_code=code,
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
def make_kernel_render(
|
| 106 |
+
template_node: CUDATemplateBuffer,
|
| 107 |
+
epilogue_nodes: Optional[List[IRNode]] = None,
|
| 108 |
+
):
|
| 109 |
+
kernel = CUDATemplateKernel(
|
| 110 |
+
kernel_name="KERNEL_NAME",
|
| 111 |
+
)
|
| 112 |
+
render = functools.partial(
|
| 113 |
+
self.render,
|
| 114 |
+
kernel=kernel,
|
| 115 |
+
template_buffer_node=template_node,
|
| 116 |
+
epilogue_nodes=epilogue_nodes,
|
| 117 |
+
**kwargs, # includes "op" argument in case of CUTLASSGemmTemplate
|
| 118 |
+
)
|
| 119 |
+
return kernel, render
|
| 120 |
+
|
| 121 |
+
return CUDATemplateCaller(
|
| 122 |
+
kernel_hash_name,
|
| 123 |
+
self.name,
|
| 124 |
+
self.input_nodes,
|
| 125 |
+
self.output_node.get_layout(),
|
| 126 |
+
make_kernel_render,
|
| 127 |
+
bmreq,
|
| 128 |
+
self,
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
def header(self) -> IndentedBuffer:
|
| 132 |
+
res = IndentedBuffer()
|
| 133 |
+
res.splice(
|
| 134 |
+
"""
|
| 135 |
+
#include <exception>
|
| 136 |
+
#include <iostream>
|
| 137 |
+
#include <memory>
|
| 138 |
+
#include <random>
|
| 139 |
+
#include <vector>
|
| 140 |
+
"""
|
| 141 |
+
)
|
| 142 |
+
return res
|
| 143 |
+
|
| 144 |
+
def globals(self) -> IndentedBuffer:
|
| 145 |
+
res = IndentedBuffer()
|
| 146 |
+
res.splice(
|
| 147 |
+
"""
|
| 148 |
+
// We compile all models with -fvisibility=hidden. Any symbols that need to be
|
| 149 |
+
// exposed in the final shared library must be declared with PT_EXPORT to make
|
| 150 |
+
// them visible.
|
| 151 |
+
#ifdef __GNUC__ // Applies to any compiler with GNU extensions (clang and g++)
|
| 152 |
+
#define PT_EXPORT __attribute__((__visibility__("default")))
|
| 153 |
+
#else
|
| 154 |
+
#ifdef _WIN32
|
| 155 |
+
#define PT_EXPORT __declspec(dllexport)
|
| 156 |
+
#else
|
| 157 |
+
#define PT_EXPORT
|
| 158 |
+
#endif
|
| 159 |
+
#endif
|
| 160 |
+
using bfloat16 = nv_bfloat16;
|
| 161 |
+
"""
|
| 162 |
+
)
|
| 163 |
+
return res
|
| 164 |
+
|
| 165 |
+
def render(self, **kwargs) -> str:
|
| 166 |
+
raise NotImplementedError
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class CUTLASSTemplate(CUDATemplate):
|
| 170 |
+
"""
|
| 171 |
+
CUTLASSTemplate is a class that provides a template for generating CUTLASS Templates. Used as a baseclass for the
|
| 172 |
+
CUTLASSGemmTemplate, providing functionality that might also be relevant for non-GEMM CUTLASS Kernels.
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
def header(self) -> IndentedBuffer:
|
| 176 |
+
res = super().header()
|
| 177 |
+
res.splice(
|
| 178 |
+
"""
|
| 179 |
+
#include "cute/tensor.hpp"
|
| 180 |
+
#include "cutlass/cutlass.h"
|
| 181 |
+
#include "cutlass/numeric_types.h"
|
| 182 |
+
#include "cutlass/tensor_ref.h"
|
| 183 |
+
#include "cutlass/util/host_tensor.h"
|
| 184 |
+
#include "cutlass/util/reference/host/tensor_fill.h"
|
| 185 |
+
#include "cutlass/util/reference/device/tensor_fill.h"
|
| 186 |
+
#include "cutlass/util/device_memory.h"
|
| 187 |
+
"""
|
| 188 |
+
)
|
| 189 |
+
return res
|
| 190 |
+
|
| 191 |
+
def globals(self) -> IndentedBuffer:
|
| 192 |
+
res = super().globals()
|
| 193 |
+
res.splice(
|
| 194 |
+
"""
|
| 195 |
+
using namespace cute;
|
| 196 |
+
#define CUTLASS_CHECK(status) \\
|
| 197 |
+
{ \\
|
| 198 |
+
cutlass::Status error = status; \\
|
| 199 |
+
if (error != cutlass::Status::kSuccess) { \\
|
| 200 |
+
auto msg = std::string("[") + __FILE__ + "] Got cutlass error: " + \\
|
| 201 |
+
cutlassGetStatusString(error) + " at: " + std::to_string(__LINE__); \\
|
| 202 |
+
throw std::runtime_error(msg); \\
|
| 203 |
+
} \\
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
// Used as pass-through functor in EVT just for type casting / rounding
|
| 207 |
+
template <typename T>
|
| 208 |
+
struct identity_op {
|
| 209 |
+
CUTLASS_HOST_DEVICE
|
| 210 |
+
T operator()(T val) const { return val; }
|
| 211 |
+
};
|
| 212 |
+
|
| 213 |
+
"""
|
| 214 |
+
)
|
| 215 |
+
return res
|
| 216 |
+
|
| 217 |
+
def cute_int(self, int_str: str, var_name: str) -> str:
|
| 218 |
+
res = ""
|
| 219 |
+
if int_str in {"1", "1L"}:
|
| 220 |
+
res = "cute::Int<1>{}"
|
| 221 |
+
else:
|
| 222 |
+
res = int_str
|
| 223 |
+
|
| 224 |
+
return f"{res} /* {var_name} */"
|
| 225 |
+
|
| 226 |
+
_DTYPE_TO_CUTLASS = {
|
| 227 |
+
torch.float32: "float",
|
| 228 |
+
torch.float64: "double",
|
| 229 |
+
torch.float16: "cutlass::half_t",
|
| 230 |
+
torch.int32: "int",
|
| 231 |
+
torch.int8: "int8_t",
|
| 232 |
+
torch.uint8: "uint8_t",
|
| 233 |
+
torch.bool: "bool",
|
| 234 |
+
torch.bfloat16: "cutlass::bfloat16_t",
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
def cutlass_type_cast(self, node: IRNode, ptr: str) -> str:
|
| 238 |
+
if node is None:
|
| 239 |
+
return ptr
|
| 240 |
+
else:
|
| 241 |
+
return f"({self._DTYPE_TO_CUTLASS.get(node.get_dtype())}*)({ptr})"
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/gemm_operation_extensions.cpython-310.pyc
ADDED
|
Binary file (6.62 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/gemm_operation_extensions.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..cutlass_utils import try_import_cutlass
|
| 2 |
+
|
| 3 |
+
if try_import_cutlass():
|
| 4 |
+
import enum
|
| 5 |
+
|
| 6 |
+
from cutlass_library.library import * # noqa: F401, F403
|
| 7 |
+
from cutlass_library.gemm_operation import * # noqa: F401, F403
|
| 8 |
+
|
| 9 |
+
# copied / modified from original at
|
| 10 |
+
# https://github.com/NVIDIA/cutlass/blob/8783c41851cd3582490e04e69e0cd756a8c1db7f/tools/library/scripts/gemm_operation.py#L658
|
| 11 |
+
# to support EVT similar to
|
| 12 |
+
# https://github.com/NVIDIA/cutlass/blob/8783c41851cd3582490e04e69e0cd756a8c1db7f/examples/49_hopper_gemm_with_collective_builder/49_collective_builder.cu#L315C69-L315C69 # noqa: B950
|
| 13 |
+
class EmitGemmUniversal3xInstanceWithEVT:
|
| 14 |
+
"""Responsible for emitting a CUTLASS 3.x template definition"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, operation_suffix=""):
|
| 17 |
+
self.operation_suffix = operation_suffix
|
| 18 |
+
self.includes = [
|
| 19 |
+
"cutlass/cutlass.h",
|
| 20 |
+
"cutlass/gemm/gemm.h",
|
| 21 |
+
"cutlass/numeric_types.h",
|
| 22 |
+
"cutlass/gemm/kernel/gemm_universal.hpp",
|
| 23 |
+
"cutlass/gemm/collective/collective_builder.hpp",
|
| 24 |
+
"cutlass/epilogue/collective/collective_builder.hpp",
|
| 25 |
+
]
|
| 26 |
+
self.builtin_epilogue_functor_template = """
|
| 27 |
+
${epilogue_functor}<
|
| 28 |
+
${element_c},
|
| 29 |
+
${epilogue_vector_length},
|
| 30 |
+
${element_accumulator},
|
| 31 |
+
${element_epilogue}
|
| 32 |
+
>
|
| 33 |
+
"""
|
| 34 |
+
self.gemm_template = """
|
| 35 |
+
using EpilogueScheduleType = ${epilogue_schedule};
|
| 36 |
+
static_assert(cute::is_same_v<EpilogueScheduleType, cutlass::epilogue::TmaWarpSpecialized> ||
|
| 37 |
+
cute::is_same_v<EpilogueScheduleType, cutlass::epilogue::TmaWarpSpecializedCooperative>,
|
| 38 |
+
"Epilogue visitor trees are currently only supported by the TMA warp-specialized epilogue");
|
| 39 |
+
static constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
|
| 40 |
+
using ElementAcc = ${element_accumulator};
|
| 41 |
+
using ElementD = ${element_d};
|
| 42 |
+
${epilogue_functor};
|
| 43 |
+
using ${operation_name}_epilogue =
|
| 44 |
+
typename cutlass::epilogue::collective::CollectiveBuilder<
|
| 45 |
+
${arch}, ${opcode_class},
|
| 46 |
+
cute::Shape<cute::_${tile_shape_m}, cute::_${tile_shape_n}, cute::_${tile_shape_k}>,
|
| 47 |
+
cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
|
| 48 |
+
cutlass::epilogue::collective::EpilogueTileAuto,
|
| 49 |
+
${element_accumulator}, ${element_epilogue},
|
| 50 |
+
${element_c}, ${layout_c}, ${align_c},
|
| 51 |
+
${element_d}, ${layout_d}, ${align_d},
|
| 52 |
+
EpilogueScheduleType,
|
| 53 |
+
${operation_name}_epilogue_functor
|
| 54 |
+
>::CollectiveOp;
|
| 55 |
+
|
| 56 |
+
using ${operation_name}_mainloop =
|
| 57 |
+
typename cutlass::gemm::collective::CollectiveBuilder<
|
| 58 |
+
${arch}, ${opcode_class},
|
| 59 |
+
${element_a}, ${layout_a}, ${align_a},
|
| 60 |
+
${element_b}, ${layout_b}, ${align_b},
|
| 61 |
+
${element_accumulator},
|
| 62 |
+
cute::Shape<cute::_${tile_shape_m}, cute::_${tile_shape_n}, cute::_${tile_shape_k}>,
|
| 63 |
+
cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
|
| 64 |
+
${stages},
|
| 65 |
+
${kernel_schedule}
|
| 66 |
+
>::CollectiveOp;
|
| 67 |
+
|
| 68 |
+
// Gemm operator ${operation_name}
|
| 69 |
+
using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal<
|
| 70 |
+
cute::Shape<int,int,int,int>,
|
| 71 |
+
${operation_name}_mainloop,
|
| 72 |
+
${operation_name}_epilogue,
|
| 73 |
+
${tile_scheduler}>;
|
| 74 |
+
|
| 75 |
+
// Define named type
|
| 76 |
+
struct ${operation_name} :
|
| 77 |
+
public ${operation_name}_base { };
|
| 78 |
+
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
#
|
| 82 |
+
def instance_template(self):
|
| 83 |
+
return """
|
| 84 |
+
${compile_guard_start}
|
| 85 |
+
using GemmKernel = cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>;
|
| 86 |
+
manifest.append(
|
| 87 |
+
new ${gemm_kind}<GemmKernel>("${operation_name}"));
|
| 88 |
+
${compile_guard_end}
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
#
|
| 92 |
+
def emit(self, operation):
|
| 93 |
+
tile_shape = operation.tile_description.tile_shape
|
| 94 |
+
warp_count = operation.tile_description.warp_count
|
| 95 |
+
# stage count set to zero indicates builder automatic stage selection
|
| 96 |
+
if operation.tile_description.stages > 0:
|
| 97 |
+
stage_count_string = f"cutlass::gemm::collective::StageCount<{str(operation.tile_description.stages)}>"
|
| 98 |
+
else:
|
| 99 |
+
stage_count_string = f"cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename {str(operation.procedural_name())}_epilogue::SharedStorage)>" # noqa: B950
|
| 100 |
+
warp_shape = [tile_shape[idx] // warp_count[idx] for idx in range(3)]
|
| 101 |
+
|
| 102 |
+
(
|
| 103 |
+
instance_layout_A,
|
| 104 |
+
instance_layout_B,
|
| 105 |
+
instance_layout_C,
|
| 106 |
+
instance_layout_D,
|
| 107 |
+
) = (
|
| 108 |
+
operation.A.layout,
|
| 109 |
+
operation.B.layout,
|
| 110 |
+
operation.C.layout,
|
| 111 |
+
operation.D.layout,
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
# 3.0 profiler integration only supports trivial epilogues for now
|
| 115 |
+
epilogue_vector_length = 1
|
| 116 |
+
|
| 117 |
+
# Support built-in epilogue functors or user-defined functions
|
| 118 |
+
if isinstance(operation.epilogue_functor, enum.Enum):
|
| 119 |
+
values = {
|
| 120 |
+
"epilogue_vector_length": str(epilogue_vector_length),
|
| 121 |
+
"element_epilogue": str(DataTypeTag[operation.element_epilogue]), # type: ignore[name-defined]
|
| 122 |
+
"epilogue_functor": EpilogueFunctorTag[operation.epilogue_functor], # type: ignore[name-defined]
|
| 123 |
+
}
|
| 124 |
+
epilogue_functor = SubstituteTemplate( # type: ignore[name-defined]
|
| 125 |
+
self.builtin_epilogue_functor_template, values
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
elif callable(operation.epilogue_functor):
|
| 129 |
+
epilogue_functor = operation.epilogue_functor(
|
| 130 |
+
operation.procedural_name() + "_epilogue_functor"
|
| 131 |
+
)
|
| 132 |
+
else:
|
| 133 |
+
epilogue_functor = str(operation.epilogue_functor)
|
| 134 |
+
#
|
| 135 |
+
|
| 136 |
+
values = {
|
| 137 |
+
"operation_name": operation.procedural_name(),
|
| 138 |
+
"operation_suffix": self.operation_suffix,
|
| 139 |
+
"element_a": DataTypeTag[operation.A.element], # type: ignore[name-defined]
|
| 140 |
+
"layout_a": LayoutTag[instance_layout_A], # type: ignore[name-defined]
|
| 141 |
+
"element_b": DataTypeTag[operation.B.element], # type: ignore[name-defined]
|
| 142 |
+
"layout_b": LayoutTag[instance_layout_B], # type: ignore[name-defined]
|
| 143 |
+
"element_c": DataTypeTag[operation.C.element], # type: ignore[name-defined]
|
| 144 |
+
"layout_c": LayoutTag[instance_layout_C], # type: ignore[name-defined]
|
| 145 |
+
"element_d": DataTypeTag[operation.D.element], # type: ignore[name-defined]
|
| 146 |
+
"layout_d": LayoutTag[instance_layout_D], # type: ignore[name-defined]
|
| 147 |
+
"element_accumulator": DataTypeTag[operation.accumulator_type()], # type: ignore[name-defined]
|
| 148 |
+
"opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], # type: ignore[name-defined] # noqa: B950
|
| 149 |
+
"arch": "cutlass::arch::Sm%d" % operation.arch,
|
| 150 |
+
"tile_shape_m": str(operation.tile_description.tile_shape[0]),
|
| 151 |
+
"tile_shape_n": str(operation.tile_description.tile_shape[1]),
|
| 152 |
+
"tile_shape_k": str(operation.tile_description.tile_shape[2]),
|
| 153 |
+
"cluster_m": str(operation.tile_description.cluster_shape[0]),
|
| 154 |
+
"cluster_n": str(operation.tile_description.cluster_shape[1]),
|
| 155 |
+
"cluster_k": str(operation.tile_description.cluster_shape[2]),
|
| 156 |
+
"warp_shape_m": str(warp_shape[0]),
|
| 157 |
+
"warp_shape_n": str(warp_shape[1]),
|
| 158 |
+
"warp_shape_k": str(warp_shape[2]),
|
| 159 |
+
"instruction_shape_m": str(
|
| 160 |
+
operation.tile_description.math_instruction.instruction_shape[0]
|
| 161 |
+
),
|
| 162 |
+
"instruction_shape_n": str(
|
| 163 |
+
operation.tile_description.math_instruction.instruction_shape[1]
|
| 164 |
+
),
|
| 165 |
+
"instruction_shape_k": str(
|
| 166 |
+
operation.tile_description.math_instruction.instruction_shape[2]
|
| 167 |
+
),
|
| 168 |
+
"kernel_schedule": str(KernelScheduleTag[operation.kernel_schedule]), # type: ignore[name-defined]
|
| 169 |
+
"epilogue_schedule": str(EpilogueScheduleTag[operation.epilogue_schedule]), # type: ignore[name-defined]
|
| 170 |
+
"epilogue_functor": epilogue_functor,
|
| 171 |
+
"stages": stage_count_string,
|
| 172 |
+
"align_a": str(operation.A.alignment),
|
| 173 |
+
"align_b": str(operation.B.alignment),
|
| 174 |
+
"align_c": str(operation.C.alignment),
|
| 175 |
+
"align_d": str(operation.C.alignment),
|
| 176 |
+
"transform_a": ComplexTransformTag[operation.A.complex_transform], # type: ignore[name-defined]
|
| 177 |
+
"transform_b": ComplexTransformTag[operation.B.complex_transform], # type: ignore[name-defined]
|
| 178 |
+
"math_operation": MathOperationTag[ # type: ignore[name-defined]
|
| 179 |
+
operation.tile_description.math_instruction.math_operation
|
| 180 |
+
],
|
| 181 |
+
"epilogue_vector_length": str(epilogue_vector_length),
|
| 182 |
+
"element_epilogue": str(DataTypeTag[operation.element_epilogue]), # type: ignore[name-defined]
|
| 183 |
+
"tile_scheduler": str(TileSchedulerTag[operation.tile_scheduler]), # type: ignore[name-defined]
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
return SubstituteTemplate(self.gemm_template, values) # type: ignore[name-defined]
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_utils.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from typing import Any, List, Optional
|
| 7 |
+
|
| 8 |
+
import sympy
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
from ...codecache import cache_dir
|
| 13 |
+
from ...config import cuda as inductor_cuda_config
|
| 14 |
+
from ...ir import Layout
|
| 15 |
+
from .cuda_env import get_cuda_arch, get_cuda_version
|
| 16 |
+
|
| 17 |
+
log = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _rename_cutlass_import(content: str, cutlass_modules: List[str]) -> str:
|
| 21 |
+
for cutlass_module in cutlass_modules:
|
| 22 |
+
content = content.replace(
|
| 23 |
+
f"from {cutlass_module} import ",
|
| 24 |
+
f"from cutlass_library.{cutlass_module} import ",
|
| 25 |
+
)
|
| 26 |
+
return content
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def _gen_cutlass_file(
|
| 30 |
+
file_name: str, cutlass_modules: List[str], src_dir: str, dst_dir: str
|
| 31 |
+
) -> None:
|
| 32 |
+
orig_full_path = os.path.abspath(os.path.join(src_dir, file_name))
|
| 33 |
+
text = ""
|
| 34 |
+
with open(orig_full_path) as f:
|
| 35 |
+
text = f.read()
|
| 36 |
+
text = _rename_cutlass_import(text, cutlass_modules)
|
| 37 |
+
dst_full_path = os.path.abspath(
|
| 38 |
+
os.path.join(
|
| 39 |
+
dst_dir,
|
| 40 |
+
file_name,
|
| 41 |
+
)
|
| 42 |
+
)
|
| 43 |
+
with open(dst_full_path, "w") as f:
|
| 44 |
+
f.write(text)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@functools.lru_cache(None)
|
| 48 |
+
def try_import_cutlass() -> bool:
|
| 49 |
+
# Copy CUTLASS python scripts to a temp dir and add the temp dir to Python search path.
|
| 50 |
+
# This is a temporary hack to avoid CUTLASS module naming conflicts.
|
| 51 |
+
# TODO(ipiszy): remove this hack when CUTLASS solves Python scripts packaging structure issues.
|
| 52 |
+
|
| 53 |
+
cutlass_py_full_path = os.path.abspath(
|
| 54 |
+
os.path.join(inductor_cuda_config.cutlass_dir, "python/cutlass_library")
|
| 55 |
+
)
|
| 56 |
+
tmp_cutlass_py_full_path = os.path.abspath(
|
| 57 |
+
os.path.join(cache_dir(), "torch_cutlass_library")
|
| 58 |
+
)
|
| 59 |
+
dst_link = os.path.join(tmp_cutlass_py_full_path, "cutlass_library")
|
| 60 |
+
|
| 61 |
+
if os.path.isdir(cutlass_py_full_path):
|
| 62 |
+
if tmp_cutlass_py_full_path not in sys.path:
|
| 63 |
+
if os.path.exists(dst_link):
|
| 64 |
+
assert os.path.islink(
|
| 65 |
+
dst_link
|
| 66 |
+
), f"{dst_link} is not a symlink. Try to remove {dst_link} manually and try again."
|
| 67 |
+
assert os.path.realpath(os.readlink(dst_link)) == os.path.realpath(
|
| 68 |
+
cutlass_py_full_path
|
| 69 |
+
), f"Symlink at {dst_link} does not point to {cutlass_py_full_path}"
|
| 70 |
+
else:
|
| 71 |
+
os.makedirs(tmp_cutlass_py_full_path, exist_ok=True)
|
| 72 |
+
os.symlink(cutlass_py_full_path, dst_link)
|
| 73 |
+
sys.path.append(tmp_cutlass_py_full_path)
|
| 74 |
+
try:
|
| 75 |
+
import cutlass_library.generator # noqa: F401
|
| 76 |
+
import cutlass_library.library # noqa: F401
|
| 77 |
+
import cutlass_library.manifest # noqa: F401
|
| 78 |
+
|
| 79 |
+
return True
|
| 80 |
+
|
| 81 |
+
except ImportError as e:
|
| 82 |
+
log.debug(
|
| 83 |
+
"Failed to import CUTLASS packages: %s, ignoring the CUTLASS backend.",
|
| 84 |
+
str(e),
|
| 85 |
+
)
|
| 86 |
+
else:
|
| 87 |
+
log.debug(
|
| 88 |
+
"Failed to import CUTLASS packages: CUTLASS repo does not exist: %s",
|
| 89 |
+
cutlass_py_full_path,
|
| 90 |
+
)
|
| 91 |
+
return False
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _normalize_cuda_arch(arch: str) -> str:
|
| 95 |
+
if int(arch) >= 90:
|
| 96 |
+
return "90"
|
| 97 |
+
elif int(arch) >= 80:
|
| 98 |
+
return "80"
|
| 99 |
+
elif int(arch) >= 75:
|
| 100 |
+
return "75"
|
| 101 |
+
elif int(arch) >= 70:
|
| 102 |
+
return "70"
|
| 103 |
+
else:
|
| 104 |
+
raise NotImplementedError(f"Unsupported cuda arch: {arch}")
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
@dataclass
|
| 108 |
+
class CUTLASSArgs:
|
| 109 |
+
"""
|
| 110 |
+
CUTLASS args used to initialize a CUTLASS Manifest.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
architectures: Optional[str] = None
|
| 114 |
+
cuda_version: Optional[str] = None
|
| 115 |
+
|
| 116 |
+
operations = "all"
|
| 117 |
+
build_dir = ""
|
| 118 |
+
curr_build_dir = ""
|
| 119 |
+
generator_target = ""
|
| 120 |
+
kernels = "all"
|
| 121 |
+
ignore_kernels = ""
|
| 122 |
+
kernel_filter_file = None
|
| 123 |
+
selected_kernel_list = None
|
| 124 |
+
interface_dir = None
|
| 125 |
+
filter_by_cc = True
|
| 126 |
+
disable_full_archs_compilation = False
|
| 127 |
+
|
| 128 |
+
def __post_init__(self):
|
| 129 |
+
if self.architectures is None or self.cuda_version is None:
|
| 130 |
+
raise RuntimeError(
|
| 131 |
+
f"{self.architectures=} or {self.cuda_version=} is None!"
|
| 132 |
+
)
|
| 133 |
+
self.architectures = _normalize_cuda_arch(self.architectures)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
@functools.lru_cache(None)
|
| 137 |
+
def _gen_ops_cached(arch, version) -> List[Any]:
|
| 138 |
+
# Note: Cache needs to be specific for cuda architecture and version
|
| 139 |
+
|
| 140 |
+
# Import cutlass python scripts.
|
| 141 |
+
assert try_import_cutlass()
|
| 142 |
+
import cutlass_library.generator as cutlass_generator
|
| 143 |
+
import cutlass_library.manifest as cutlass_manifest
|
| 144 |
+
|
| 145 |
+
if arch is None or version is None:
|
| 146 |
+
log.error(
|
| 147 |
+
"Cannot detect cuda arch %s or cuda version %s. "
|
| 148 |
+
"Will discard all cutlass ops. "
|
| 149 |
+
"Please consider setting _inductor.cuda.arch and _inductor.cuda.version configs.",
|
| 150 |
+
arch,
|
| 151 |
+
version,
|
| 152 |
+
)
|
| 153 |
+
return list()
|
| 154 |
+
arch = _normalize_cuda_arch(arch)
|
| 155 |
+
args = CUTLASSArgs(architectures=arch, cuda_version=version)
|
| 156 |
+
manifest = cutlass_manifest.Manifest(args)
|
| 157 |
+
|
| 158 |
+
if arch == "90":
|
| 159 |
+
cutlass_generator.GenerateSM90(manifest, args.cuda_version)
|
| 160 |
+
cutlass_generator.GenerateSM80(manifest, args.cuda_version)
|
| 161 |
+
else:
|
| 162 |
+
try:
|
| 163 |
+
func = getattr(cutlass_generator, "GenerateSM" + arch)
|
| 164 |
+
func(manifest, args.cuda_version)
|
| 165 |
+
except AttributeError as e:
|
| 166 |
+
raise NotImplementedError(
|
| 167 |
+
"Arch " + arch + " is not supported by current cutlass lib."
|
| 168 |
+
) from e
|
| 169 |
+
return manifest.operations
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def gen_ops() -> List[Any]:
|
| 173 |
+
"""
|
| 174 |
+
Generates all supported CUTLASS operations.
|
| 175 |
+
"""
|
| 176 |
+
arch = get_cuda_arch()
|
| 177 |
+
version = get_cuda_version()
|
| 178 |
+
return _gen_ops_cached(arch, version)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def dtype_match(
|
| 182 |
+
torch_dtype: Optional[torch.dtype],
|
| 183 |
+
cutlass_dtype: "cutlass_library.library.DataType", # type: ignore[name-defined]
|
| 184 |
+
) -> bool:
|
| 185 |
+
# Import cutlass python scripts.
|
| 186 |
+
assert try_import_cutlass()
|
| 187 |
+
import cutlass_library
|
| 188 |
+
|
| 189 |
+
if torch_dtype == torch.float:
|
| 190 |
+
return (
|
| 191 |
+
cutlass_dtype == cutlass_library.library.DataType.f32
|
| 192 |
+
or cutlass_dtype == cutlass_library.library.DataType.tf32
|
| 193 |
+
)
|
| 194 |
+
elif torch_dtype == torch.half:
|
| 195 |
+
return cutlass_dtype == cutlass_library.library.DataType.f16
|
| 196 |
+
elif torch_dtype == torch.bfloat16:
|
| 197 |
+
return cutlass_dtype == cutlass_library.library.DataType.bf16
|
| 198 |
+
else:
|
| 199 |
+
return False
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def get_accumulator_dtype(
|
| 203 |
+
input_torch_dtypes: List[torch.dtype],
|
| 204 |
+
) -> Optional[torch.dtype]:
|
| 205 |
+
"""
|
| 206 |
+
Given a list of input torch dtypes, returns the inferred accumulator torch dtype.
|
| 207 |
+
"""
|
| 208 |
+
|
| 209 |
+
if len(input_torch_dtypes) == 0:
|
| 210 |
+
return None
|
| 211 |
+
torch_dtype = input_torch_dtypes[0]
|
| 212 |
+
for dtype in input_torch_dtypes[1:]:
|
| 213 |
+
if torch_dtype != dtype:
|
| 214 |
+
raise RuntimeError(f"Unmatched input dtypes: {torch_dtype=}, {dtype=}")
|
| 215 |
+
if torch_dtype == torch.half:
|
| 216 |
+
if torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction:
|
| 217 |
+
return torch_dtype
|
| 218 |
+
else:
|
| 219 |
+
return torch.float
|
| 220 |
+
if torch_dtype in {torch.bfloat16, torch.float}:
|
| 221 |
+
return torch.float
|
| 222 |
+
raise NotImplementedError(f"Unsupported data type: {input_torch_dtypes=}")
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def get_alignments(torch_dtype: torch.dtype) -> List[int]:
|
| 226 |
+
"""
|
| 227 |
+
Returns all possible valid CUTLASS alignments in terms of the number of elements for a given dtype.
|
| 228 |
+
CUTLASS gemm / conv SM80 APIs support 16 bytes max alignment, and 2 bytes min alignment.
|
| 229 |
+
"""
|
| 230 |
+
|
| 231 |
+
if torch_dtype in (torch.half, torch.bfloat16):
|
| 232 |
+
return [8, 4, 2, 1]
|
| 233 |
+
elif torch_dtype == torch.float:
|
| 234 |
+
return [4, 2, 1]
|
| 235 |
+
else:
|
| 236 |
+
raise NotImplementedError(f"unsupported {torch_dtype=} for alignments")
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def get_max_alignment(inductor_layout: Layout) -> int:
|
| 240 |
+
"""
|
| 241 |
+
Returns the max alignment (in terms of number of elements) for a given Inductor Layout.
|
| 242 |
+
"""
|
| 243 |
+
|
| 244 |
+
dtype = inductor_layout.dtype
|
| 245 |
+
size = inductor_layout.size
|
| 246 |
+
offset = inductor_layout.offset
|
| 247 |
+
|
| 248 |
+
def is_static_int(number):
|
| 249 |
+
return isinstance(number, (int, sympy.Integer))
|
| 250 |
+
|
| 251 |
+
if is_static_int(size[-1]) and is_static_int(offset):
|
| 252 |
+
alignments = get_alignments(dtype)
|
| 253 |
+
for alignment in alignments:
|
| 254 |
+
if int(size[-1]) % alignment == 0 and int(offset) % alignment == 0:
|
| 255 |
+
return alignment
|
| 256 |
+
|
| 257 |
+
return 1
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/gemm_template.py
ADDED
|
@@ -0,0 +1,706 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import logging
|
| 3 |
+
import re
|
| 4 |
+
from typing import cast, Dict, List, Optional, Tuple
|
| 5 |
+
|
| 6 |
+
from ...config import cuda as inductor_cuda_config
|
| 7 |
+
from ...ir import Buffer, CUDATemplateBuffer, FixedLayout, IRNode, Layout
|
| 8 |
+
from ..common import IndentedBuffer
|
| 9 |
+
|
| 10 |
+
from . import cutlass_utils
|
| 11 |
+
from .cuda_kernel import CUDATemplateKernel
|
| 12 |
+
from .cuda_template import CUTLASSTemplate
|
| 13 |
+
from .cutlass_epilogue_gen import (
|
| 14 |
+
CutlassEVTEpilogueArgumentFormatter,
|
| 15 |
+
CutlassEVTEpilogueTypeFormatter,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
log = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
GEMM_TEMPLATE = r"""
|
| 21 |
+
{{template.header().getvalue()}}
|
| 22 |
+
{{template.globals().getvalue()}}
|
| 23 |
+
{{instance_definition}}
|
| 24 |
+
// When workspace_size is not a nullptr, populates requested workspace_size and returns.
|
| 25 |
+
// Otherwise, computes the Gemm kernel using the given workspace ptr.
|
| 26 |
+
extern "C" {
|
| 27 |
+
{{kernel.def_kernel(inputs=[X, W, Bias], outputs=[Y], names_str="X, W, Bias, Y", input_reorder=input_reorder)}} {
|
| 28 |
+
try {
|
| 29 |
+
{{kernel.check_not_null(X)}}
|
| 30 |
+
{{kernel.check_not_null(W)}}
|
| 31 |
+
{{kernel.check_not_null(Bias)}}
|
| 32 |
+
{{kernel.check_not_null(Y)}}
|
| 33 |
+
int64_t B = {{kernel.size(Y, 0, -3, default_value=1)}};
|
| 34 |
+
int64_t M = {{kernel.size(X, -2)}};
|
| 35 |
+
int64_t K = {{kernel.size(X, -1)}};
|
| 36 |
+
int64_t N = {{kernel.size(W, -1)}};
|
| 37 |
+
using ElementComputeEpilogue = {{instance_type}}::ElementAccumulator;
|
| 38 |
+
using coord_t = cutlass::gemm::GemmCoord::Index;
|
| 39 |
+
{{instance_type}}::Arguments arguments;
|
| 40 |
+
{{template.render_gemm_arguments(argument_template, epilogue_template, should_swap_xw,
|
| 41 |
+
X, W, Bias, Y, alpha, beta, kernel, epilogue_args)}}
|
| 42 |
+
{{instance_type}} gemm_op;
|
| 43 |
+
if (workspace_size) {
|
| 44 |
+
*workspace_size = gemm_op.get_workspace_size(arguments);
|
| 45 |
+
return 0;
|
| 46 |
+
}
|
| 47 |
+
{
|
| 48 |
+
auto status = gemm_op.can_implement(arguments);
|
| 49 |
+
CUTLASS_CHECK(status);
|
| 50 |
+
}
|
| 51 |
+
{
|
| 52 |
+
auto status = gemm_op.initialize(arguments, workspace, stream);
|
| 53 |
+
CUTLASS_CHECK(status);
|
| 54 |
+
}
|
| 55 |
+
{
|
| 56 |
+
auto status = gemm_op(stream);
|
| 57 |
+
CUTLASS_CHECK(status);
|
| 58 |
+
}
|
| 59 |
+
}
|
| 60 |
+
catch (std::exception& e) {
|
| 61 |
+
std::cerr << "Runtime error: " << e.what() << std::endl;
|
| 62 |
+
return -1;
|
| 63 |
+
}
|
| 64 |
+
catch (...) {
|
| 65 |
+
return -1;
|
| 66 |
+
}
|
| 67 |
+
return 0;
|
| 68 |
+
}
|
| 69 |
+
}
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
GEMM_ARGS_CUTLASS_2X = r"""
|
| 74 |
+
int64_t batch_stride_x = {{kernel.stride(X, -3)}};
|
| 75 |
+
int64_t row_stride_x = {{kernel.row_or_column_stride(X)}};
|
| 76 |
+
int64_t batch_stride_w = {{kernel.stride(W, -3)}};
|
| 77 |
+
int64_t row_stride_w = {{kernel.row_or_column_stride(W)}};
|
| 78 |
+
int64_t batch_stride_bias = {{kernel.stride(Bias, -3)}};
|
| 79 |
+
int64_t row_stride_bias = {{kernel.row_or_column_stride(Bias)}};
|
| 80 |
+
int64_t batch_stride_y = {{kernel.stride(Y, -3)}};
|
| 81 |
+
int64_t row_stride_y = {{kernel.row_or_column_stride(Y)}};
|
| 82 |
+
// Initialize GemmUniversalInstance arguments.
|
| 83 |
+
arguments = {
|
| 84 |
+
{{template.gemm_mode()}}, // GemmUniversalMode mode
|
| 85 |
+
{
|
| 86 |
+
static_cast<coord_t>(M),
|
| 87 |
+
static_cast<coord_t>(N),
|
| 88 |
+
static_cast<coord_t>(K)
|
| 89 |
+
}, // GemmCoord problem_size
|
| 90 |
+
{{split_k if split_k > 1 else 'B'}}, // int batch_count
|
| 91 |
+
{ElementComputeEpilogue({{alpha}}), ElementComputeEpilogue({{beta}})}, // typename EpilogueOutputOp::Params epilogue
|
| 92 |
+
{{template.cutlass_type_cast(X, kernel.ptr(X))}}, // void const * ptr_A
|
| 93 |
+
{{template.cutlass_type_cast(W, kernel.ptr(W))}}, // void const * ptr_B
|
| 94 |
+
{{template.cutlass_type_cast(Bias, kernel.ptr(Bias))}}, // void const * ptr_C
|
| 95 |
+
{{template.cutlass_type_cast(Y, kernel.ptr(Y))}}, // void * ptr_D
|
| 96 |
+
batch_stride_x, // int64_t batch_stride_A
|
| 97 |
+
batch_stride_w, // int64_t batch_stride_B
|
| 98 |
+
batch_stride_bias, // int64_t batch_stride_C
|
| 99 |
+
batch_stride_y, // int64_t batch_stride_D
|
| 100 |
+
row_stride_x, // typename LayoutA::Stride::LongIndex lda
|
| 101 |
+
row_stride_w, // typename LayoutB::Stride::LongIndex ldb
|
| 102 |
+
row_stride_bias, // typename LayoutC::Stride::LongIndex ldc
|
| 103 |
+
row_stride_y, // typename LayoutC::Stride::LongIndex ldd
|
| 104 |
+
};
|
| 105 |
+
"""
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
GEMM_ARGS_CUTLASS_3X = r"""
|
| 109 |
+
// Initialize GemmUniversal3xInstance arguments.
|
| 110 |
+
arguments = {
|
| 111 |
+
{{template.gemm_mode()}}, // GemmUniversalMode mode
|
| 112 |
+
{
|
| 113 |
+
static_cast<coord_t>({{M}}),
|
| 114 |
+
static_cast<coord_t>({{N}}),
|
| 115 |
+
static_cast<coord_t>(K),
|
| 116 |
+
static_cast<coord_t>(B)
|
| 117 |
+
}, // ProblemShape problem_shape
|
| 118 |
+
{
|
| 119 |
+
{{template.cutlass_type_cast(X, kernel.ptr(X))}}, // ElementA const* ptr_A
|
| 120 |
+
{
|
| 121 |
+
{{template.cute_int(kernel.stride(X, -2), "stride_x0")}},
|
| 122 |
+
{{template.cute_int(kernel.stride(X, -1), "stride_x1")}},
|
| 123 |
+
{{template.cute_int(kernel.stride(X, -3), "batch_stride_x")}}
|
| 124 |
+
}, // StrideA dA
|
| 125 |
+
{{template.cutlass_type_cast(W, kernel.ptr(W))}}, // ElementB const* ptr_B
|
| 126 |
+
{
|
| 127 |
+
{{template.cute_int(kernel.stride(W, -1), "stride_w1")}},
|
| 128 |
+
{{template.cute_int(kernel.stride(W, -2), "stride_w0")}},
|
| 129 |
+
{{template.cute_int(kernel.stride(W, -3), "batch_stride_w")}}
|
| 130 |
+
}, // StrideB dB
|
| 131 |
+
}, // MainloopArguments mainloop
|
| 132 |
+
{{epilogue_arguments}}
|
| 133 |
+
};
|
| 134 |
+
"""
|
| 135 |
+
|
| 136 |
+
GEMM_ARGS_CUTLASS_3X_EPILOGUE = r"""
|
| 137 |
+
// see https://tinyurl.com/4rk89z48
|
| 138 |
+
{
|
| 139 |
+
{{epilogue_args}}, // thread, typename FusionCallbacks::Arguments ( EVT ) or ThreadEpilogueOp::Params (non-EVT )
|
| 140 |
+
{{template.cutlass_type_cast(Bias, kernel.ptr(Bias))}}, // ElementC const* ptr_C
|
| 141 |
+
{
|
| 142 |
+
{{template.cute_int(kernel.stride(Bias, -2, 1), "stride_bias0")}},
|
| 143 |
+
{{template.cute_int(kernel.stride(Bias, -1, 1), "stride_bias1")}},
|
| 144 |
+
{{template.cute_int(kernel.stride(Bias, -3), "batch_stride_bias")}}
|
| 145 |
+
}, // StrideC dC
|
| 146 |
+
{{template.cutlass_type_cast(Y, kernel.ptr(Y))}}, // ElementD const* ptr_D
|
| 147 |
+
{
|
| 148 |
+
{{template.cute_int(kernel.stride(Y, -2), "stride_y0")}},
|
| 149 |
+
{{template.cute_int(kernel.stride(Y, -1), "stride_y1")}},
|
| 150 |
+
{{template.cute_int(kernel.stride(Y, -3), "batch_stride_y")}}
|
| 151 |
+
}, // StrideD dD
|
| 152 |
+
}, // EpilogueArguments epilogue
|
| 153 |
+
"""
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
class CUTLASSGemmTemplate(CUTLASSTemplate):
|
| 157 |
+
"""
|
| 158 |
+
CUTLASS GEMM template, which is used to generate CUTLASS GEMM kernels
|
| 159 |
+
including those which allow flexible fusions with epilogues.
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
def __init__(
|
| 163 |
+
self,
|
| 164 |
+
input_nodes: List[Buffer],
|
| 165 |
+
layout: Layout,
|
| 166 |
+
alpha: float,
|
| 167 |
+
beta: float,
|
| 168 |
+
input_reorder: Optional[List[int]] = None,
|
| 169 |
+
can_fuse_epilogue: Optional[bool] = None,
|
| 170 |
+
):
|
| 171 |
+
"""
|
| 172 |
+
Args:
|
| 173 |
+
input_nodes: input nodes of the kernel
|
| 174 |
+
layout: layout of the output node
|
| 175 |
+
alpha: alpha value of the GEMM operation
|
| 176 |
+
beta: beta value of the GEMM operation
|
| 177 |
+
input_reorder: reorder of the input nodes
|
| 178 |
+
can_fuse_epilogue: If set to True, will only list and use operators capable of flexible epilogue fusions.
|
| 179 |
+
If False, it will not use those. If None, both may be listed, but it will not allow fusions.
|
| 180 |
+
Defaults to None
|
| 181 |
+
"""
|
| 182 |
+
super().__init__("cutlass_gemm", input_nodes, layout, input_reorder)
|
| 183 |
+
self.alpha = alpha
|
| 184 |
+
self.beta = beta
|
| 185 |
+
self.can_fuse_epilogue = can_fuse_epilogue
|
| 186 |
+
|
| 187 |
+
@staticmethod
|
| 188 |
+
def add_cutlass_gemm_choices(
|
| 189 |
+
choices,
|
| 190 |
+
layout,
|
| 191 |
+
input_nodes,
|
| 192 |
+
alpha=1,
|
| 193 |
+
beta=0,
|
| 194 |
+
input_reorder=None,
|
| 195 |
+
fuseable=True,
|
| 196 |
+
non_fuseable=True,
|
| 197 |
+
):
|
| 198 |
+
if non_fuseable:
|
| 199 |
+
if fuseable:
|
| 200 |
+
# list both fuseable and non-fuseable ops, and treat them all as non-fuseable
|
| 201 |
+
can_fuse_epilogue = False
|
| 202 |
+
else:
|
| 203 |
+
can_fuse_epilogue = None
|
| 204 |
+
|
| 205 |
+
cutlass_template = CUTLASSGemmTemplate(
|
| 206 |
+
input_nodes,
|
| 207 |
+
layout,
|
| 208 |
+
alpha=alpha,
|
| 209 |
+
beta=beta,
|
| 210 |
+
input_reorder=input_reorder,
|
| 211 |
+
can_fuse_epilogue=can_fuse_epilogue,
|
| 212 |
+
)
|
| 213 |
+
ops = cutlass_template.gen_ops()
|
| 214 |
+
for op in ops:
|
| 215 |
+
cutlass_template.maybe_append_choice(
|
| 216 |
+
choices,
|
| 217 |
+
op=op,
|
| 218 |
+
)
|
| 219 |
+
else:
|
| 220 |
+
ops = []
|
| 221 |
+
if fuseable:
|
| 222 |
+
cutlass_template_evt = CUTLASSGemmTemplate(
|
| 223 |
+
input_nodes,
|
| 224 |
+
layout,
|
| 225 |
+
alpha=alpha,
|
| 226 |
+
beta=beta,
|
| 227 |
+
input_reorder=input_reorder,
|
| 228 |
+
can_fuse_epilogue=True,
|
| 229 |
+
)
|
| 230 |
+
# This will list only ops capable of EVT fusion
|
| 231 |
+
ops_evt = cutlass_template_evt.gen_ops()
|
| 232 |
+
for op in ops_evt:
|
| 233 |
+
cutlass_template_evt.maybe_append_choice(
|
| 234 |
+
choices,
|
| 235 |
+
op=op,
|
| 236 |
+
)
|
| 237 |
+
else:
|
| 238 |
+
ops_evt = []
|
| 239 |
+
log.debug(
|
| 240 |
+
"Added %d cutlass gemm configs and %d fuseable gemm configs.",
|
| 241 |
+
len(ops),
|
| 242 |
+
len(ops_evt),
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
def header(self) -> IndentedBuffer:
|
| 246 |
+
res = super().header()
|
| 247 |
+
res.splice(
|
| 248 |
+
"""
|
| 249 |
+
#include "cutlass/gemm/gemm.h"
|
| 250 |
+
#include "cutlass/gemm/device/gemm_universal.h"
|
| 251 |
+
#include "cutlass/gemm/device/gemm_universal_adapter.h"
|
| 252 |
+
#include "cutlass/gemm/kernel/gemm_universal.hpp"
|
| 253 |
+
#include "cutlass/gemm/collective/collective_builder.hpp"
|
| 254 |
+
#include "cutlass/epilogue/collective/collective_builder.hpp"
|
| 255 |
+
#include "cutlass/epilogue/collective/default_epilogue.hpp"
|
| 256 |
+
#include "cutlass/epilogue/thread/linear_combination.h"
|
| 257 |
+
#include "cutlass/gemm/dispatch_policy.hpp"
|
| 258 |
+
#include "cutlass/gemm/kernel/tile_scheduler.hpp"
|
| 259 |
+
#include "cutlass/util/distribution.h"
|
| 260 |
+
#include "cutlass/util/packed_stride.hpp"
|
| 261 |
+
#include "cutlass/util/tensor_view_io.h"
|
| 262 |
+
"""
|
| 263 |
+
)
|
| 264 |
+
return res
|
| 265 |
+
|
| 266 |
+
@staticmethod
|
| 267 |
+
def cutlass_layout(torch_layout) -> "Optional[cutlass_lib.LayoutType]": # type: ignore[name-defined]
|
| 268 |
+
assert cutlass_utils.try_import_cutlass()
|
| 269 |
+
import cutlass_library.library as cutlass_lib
|
| 270 |
+
|
| 271 |
+
if torch_layout.stride[-1] == 1:
|
| 272 |
+
return cutlass_lib.LayoutType.RowMajor
|
| 273 |
+
elif torch_layout.stride[-2] == 1:
|
| 274 |
+
return cutlass_lib.LayoutType.ColumnMajor
|
| 275 |
+
else:
|
| 276 |
+
return None
|
| 277 |
+
|
| 278 |
+
@staticmethod
|
| 279 |
+
def flip_cutlass_layout(
|
| 280 |
+
cutlass_layout: "cutlass_lib.LayoutType", # type: ignore[name-defined]
|
| 281 |
+
) -> "cutlass_lib.LayoutType": # type: ignore[name-defined]
|
| 282 |
+
assert cutlass_utils.try_import_cutlass()
|
| 283 |
+
import cutlass_library.library as cutlass_lib
|
| 284 |
+
|
| 285 |
+
if cutlass_layout == cutlass_lib.LayoutType.RowMajor:
|
| 286 |
+
return cutlass_lib.LayoutType.ColumnMajor
|
| 287 |
+
else:
|
| 288 |
+
return cutlass_lib.LayoutType.RowMajor
|
| 289 |
+
|
| 290 |
+
@staticmethod
|
| 291 |
+
def layout_match(torch_layout, cutlass_layout) -> bool:
|
| 292 |
+
return CUTLASSGemmTemplate.cutlass_layout(torch_layout) == cutlass_layout
|
| 293 |
+
|
| 294 |
+
@staticmethod
|
| 295 |
+
def set_alignment(torch_layout, op_element) -> bool:
|
| 296 |
+
alignment = cutlass_utils.get_max_alignment(torch_layout)
|
| 297 |
+
if alignment < op_element.alignment:
|
| 298 |
+
return False
|
| 299 |
+
else:
|
| 300 |
+
op_element.alignment = alignment
|
| 301 |
+
return True
|
| 302 |
+
|
| 303 |
+
@staticmethod
|
| 304 |
+
def has_tma_epilogue(op) -> bool:
|
| 305 |
+
assert cutlass_utils.try_import_cutlass()
|
| 306 |
+
import cutlass_library.library as cutlass_lib
|
| 307 |
+
|
| 308 |
+
result = False
|
| 309 |
+
if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
|
| 310 |
+
epilogue_schedule_str = str(op.epilogue_schedule).split(".")[-1]
|
| 311 |
+
result = epilogue_schedule_str.lower().startswith("tma")
|
| 312 |
+
return result
|
| 313 |
+
|
| 314 |
+
@staticmethod
|
| 315 |
+
def supports_evt(op: "cutlass_library.gemm_op.GemmOperation") -> bool: # type: ignore[name-defined]
|
| 316 |
+
"""
|
| 317 |
+
returns True if the op is capable of flexible epilogue fusions
|
| 318 |
+
using epilogue visitor trees.
|
| 319 |
+
|
| 320 |
+
See https://github.com/NVIDIA/cutlass/blob/e01b9b5029b7caca5a43c29f7d2714d7cf1dcae8/examples/49_hopper_gemm_with_collective_builder/49_collective_builder.cu#L283-L285 # noqa: B950
|
| 321 |
+
"""
|
| 322 |
+
assert cutlass_utils.try_import_cutlass()
|
| 323 |
+
import cutlass_library.library as cutlass_lib
|
| 324 |
+
|
| 325 |
+
if op.gemm_kind != cutlass_lib.GemmKind.Universal3x:
|
| 326 |
+
return False
|
| 327 |
+
if op.epilogue_schedule not in (
|
| 328 |
+
cutlass_lib.EpilogueScheduleType.TmaWarpSpecialized,
|
| 329 |
+
cutlass_lib.EpilogueScheduleType.TmaWarpSpecializedCooperative,
|
| 330 |
+
):
|
| 331 |
+
return False
|
| 332 |
+
|
| 333 |
+
return True
|
| 334 |
+
|
| 335 |
+
def render_evt_epilogue_declaration(
|
| 336 |
+
self,
|
| 337 |
+
template_output_node_name: str,
|
| 338 |
+
evt_type_name: str,
|
| 339 |
+
epilogue_nodes: List[IRNode],
|
| 340 |
+
) -> str:
|
| 341 |
+
"""Generates the epilogue for the EVT epilogue fusion"""
|
| 342 |
+
return CutlassEVTEpilogueTypeFormatter.ir_to_evt_string(
|
| 343 |
+
template_output_node_name, evt_type_name, epilogue_nodes
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
def define_gemm_instance(
|
| 347 |
+
self,
|
| 348 |
+
op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined]
|
| 349 |
+
output_buffer_name: str,
|
| 350 |
+
epilogue_nodes: Optional[List[IRNode]] = None,
|
| 351 |
+
) -> Tuple[str, str]:
|
| 352 |
+
assert cutlass_utils.try_import_cutlass()
|
| 353 |
+
import cutlass_library.gemm_operation as cutlass_gemm_op
|
| 354 |
+
import cutlass_library.library as cutlass_lib
|
| 355 |
+
|
| 356 |
+
from torch._inductor.codegen.cuda.cutlass_lib_extensions.gemm_operation_extensions import (
|
| 357 |
+
EmitGemmUniversal3xInstanceWithEVT,
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
|
| 361 |
+
if epilogue_nodes is not None and len(epilogue_nodes) > 0:
|
| 362 |
+
emitter = EmitGemmUniversal3xInstanceWithEVT()
|
| 363 |
+
op.epilogue_functor = lambda epilogue_functor_type_name: self.render_evt_epilogue_declaration(
|
| 364 |
+
output_buffer_name, epilogue_functor_type_name, epilogue_nodes
|
| 365 |
+
)
|
| 366 |
+
else:
|
| 367 |
+
emitter = cutlass_gemm_op.EmitGemmUniversal3xInstance()
|
| 368 |
+
op_def = emitter.emit(op)
|
| 369 |
+
pattern = re.compile(r"\s*struct\s(.*?)\s:")
|
| 370 |
+
decl = [line for line in op_def.split("\n") if "struct " in line][-1]
|
| 371 |
+
else:
|
| 372 |
+
if epilogue_nodes is not None and len(epilogue_nodes) > 0:
|
| 373 |
+
raise RuntimeError(
|
| 374 |
+
"EVT epilogue fusion is not supported for Cutlass 2.x ops."
|
| 375 |
+
)
|
| 376 |
+
emitter = cutlass_gemm_op.EmitGemmInstance()
|
| 377 |
+
op_def = emitter.emit(op)
|
| 378 |
+
op_def = op_def.replace(
|
| 379 |
+
"cutlass::gemm::device::Gemm", "cutlass::gemm::device::GemmUniversal"
|
| 380 |
+
)
|
| 381 |
+
op_def = op_def.replace("false,", "")
|
| 382 |
+
pattern = re.compile(r"\s*using\s(.*?)\s=")
|
| 383 |
+
decl = op_def.split("\n")[2]
|
| 384 |
+
match = pattern.match(decl)
|
| 385 |
+
if match is None:
|
| 386 |
+
raise RuntimeError("Invalid Gemm config: \n" + op_def)
|
| 387 |
+
op_type = match.groups()[0]
|
| 388 |
+
if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
|
| 389 |
+
op_def += f"\n using {op_type}_device_type = cutlass::gemm::device::GemmUniversalAdapter<{op_type}>;\n"
|
| 390 |
+
op_type = f"{op_type}_device_type"
|
| 391 |
+
return op_def, op_type
|
| 392 |
+
|
| 393 |
+
@staticmethod
|
| 394 |
+
def should_swap_XW(
|
| 395 |
+
bias: IRNode,
|
| 396 |
+
beta: float,
|
| 397 |
+
) -> bool:
|
| 398 |
+
return True
|
| 399 |
+
|
| 400 |
+
# TODO(ipiszy): Check whether it's necessary to swap X/W.
|
| 401 |
+
# strides = bias.get_stride()
|
| 402 |
+
# if strides[-1] != 1:
|
| 403 |
+
# return True
|
| 404 |
+
# for stride in strides[:-1]:
|
| 405 |
+
# if stride != 0:
|
| 406 |
+
# return True
|
| 407 |
+
# return False
|
| 408 |
+
|
| 409 |
+
@staticmethod
|
| 410 |
+
def swap_XW(
|
| 411 |
+
op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined]
|
| 412 |
+
) -> "cutlass_library.gemm_op.GemmOperation": # type: ignore[name-defined]
|
| 413 |
+
# Swap X and W in GemmOperation.
|
| 414 |
+
new_op = copy.deepcopy(op)
|
| 415 |
+
new_op.A.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.A.layout)
|
| 416 |
+
new_op.B.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.B.layout)
|
| 417 |
+
new_op.A, new_op.B = new_op.B, new_op.A
|
| 418 |
+
new_op.C.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.C.layout)
|
| 419 |
+
new_op.D.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.D.layout)
|
| 420 |
+
return new_op
|
| 421 |
+
|
| 422 |
+
def filter_op(
|
| 423 |
+
self,
|
| 424 |
+
op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined]
|
| 425 |
+
) -> "cutlass_library.gemm_op.GemmOperation": # type: ignore[name-defined]
|
| 426 |
+
assert cutlass_utils.try_import_cutlass()
|
| 427 |
+
import cutlass_library.library as cutlass_lib
|
| 428 |
+
|
| 429 |
+
# Skip simt kernels
|
| 430 |
+
if (
|
| 431 |
+
op.tile_description.math_instruction.opcode_class
|
| 432 |
+
== cutlass_lib.OpcodeClass.Simt
|
| 433 |
+
):
|
| 434 |
+
return None
|
| 435 |
+
|
| 436 |
+
# Only keep GemmUniversal kernels
|
| 437 |
+
if op.gemm_kind not in {
|
| 438 |
+
cutlass_lib.GemmKind.Universal,
|
| 439 |
+
cutlass_lib.GemmKind.Universal3x,
|
| 440 |
+
}:
|
| 441 |
+
return None
|
| 442 |
+
# Filter ops by dtypes.
|
| 443 |
+
X = self.input_nodes[0]
|
| 444 |
+
W = self.input_nodes[1]
|
| 445 |
+
accumulator_torch_dtype = cutlass_utils.get_accumulator_dtype(
|
| 446 |
+
[X.get_dtype(), W.get_dtype()],
|
| 447 |
+
)
|
| 448 |
+
if not (
|
| 449 |
+
cutlass_utils.dtype_match(X.get_dtype(), op.A.element)
|
| 450 |
+
and cutlass_utils.dtype_match(W.get_dtype(), op.B.element)
|
| 451 |
+
and cutlass_utils.dtype_match(
|
| 452 |
+
self.output_node.get_layout().dtype, op.C.element
|
| 453 |
+
)
|
| 454 |
+
and cutlass_utils.dtype_match(
|
| 455 |
+
accumulator_torch_dtype, op.accumulator_type()
|
| 456 |
+
)
|
| 457 |
+
):
|
| 458 |
+
return None
|
| 459 |
+
|
| 460 |
+
# Filter ops by input layouts.
|
| 461 |
+
if not (
|
| 462 |
+
self.layout_match(X.get_layout(), op.A.layout)
|
| 463 |
+
and self.layout_match(W.get_layout(), op.B.layout)
|
| 464 |
+
):
|
| 465 |
+
return None
|
| 466 |
+
|
| 467 |
+
# Update op.
|
| 468 |
+
op = copy.deepcopy(op)
|
| 469 |
+
|
| 470 |
+
# Set output layout.
|
| 471 |
+
op.D.layout = CUTLASSGemmTemplate.cutlass_layout(self.output_node.get_layout())
|
| 472 |
+
|
| 473 |
+
# Filter ops by alignments and set alignments.
|
| 474 |
+
if not (
|
| 475 |
+
self.set_alignment(X.get_layout(), op.A)
|
| 476 |
+
and self.set_alignment(W.get_layout(), op.B)
|
| 477 |
+
and self.set_alignment(self.output_node.get_layout(), op.D)
|
| 478 |
+
):
|
| 479 |
+
return None
|
| 480 |
+
|
| 481 |
+
# Set epilogue.
|
| 482 |
+
# TODO: update epilogue functor according to epilogues.
|
| 483 |
+
op.element_epilogue = op.accumulator_type()
|
| 484 |
+
|
| 485 |
+
# Set bias layout and alignment.
|
| 486 |
+
if len(self.input_nodes) >= 3 and self.input_nodes[2] is not None:
|
| 487 |
+
Bias = self.input_nodes[2]
|
| 488 |
+
bias_layout = CUTLASSGemmTemplate.cutlass_layout(Bias.get_layout())
|
| 489 |
+
if op.gemm_kind != cutlass_lib.GemmKind.Universal3x:
|
| 490 |
+
if bias_layout != op.D.layout:
|
| 491 |
+
# For cutlass2, bias and output layout must match
|
| 492 |
+
return None
|
| 493 |
+
else:
|
| 494 |
+
op.C.layout = bias_layout
|
| 495 |
+
if not self.set_alignment(Bias.get_layout(), op.C):
|
| 496 |
+
return None
|
| 497 |
+
else:
|
| 498 |
+
if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
|
| 499 |
+
op.C.element = cutlass_lib.DataType.void
|
| 500 |
+
else:
|
| 501 |
+
op.C.layout = op.D.layout
|
| 502 |
+
supports_evt: bool = self.supports_evt(op)
|
| 503 |
+
if (self.can_fuse_epilogue is not None) and (
|
| 504 |
+
self.can_fuse_epilogue != supports_evt
|
| 505 |
+
):
|
| 506 |
+
return None
|
| 507 |
+
if inductor_cuda_config.cutlass_only_evt_capable_ops and not supports_evt:
|
| 508 |
+
return None
|
| 509 |
+
return op
|
| 510 |
+
|
| 511 |
+
def gen_ops(self) -> "List[cutlass_gemm_op.GemmOperation]": # type: ignore[name-defined]
|
| 512 |
+
assert cutlass_utils.try_import_cutlass()
|
| 513 |
+
import cutlass_library.gemm_operation as cutlass_gemm_op
|
| 514 |
+
import cutlass_library.library as cutlass_lib
|
| 515 |
+
|
| 516 |
+
ops = cutlass_utils.gen_ops()[cutlass_lib.OperationKind.Gemm]
|
| 517 |
+
res: Dict[str, cutlass_gemm_op.GemmOperation] = dict()
|
| 518 |
+
num_3x_ops = 0
|
| 519 |
+
num_2x_ops = 0
|
| 520 |
+
for op_dict in ops.values():
|
| 521 |
+
for op_list in op_dict.values():
|
| 522 |
+
for op in op_list:
|
| 523 |
+
assert isinstance(op, cutlass_gemm_op.GemmOperation)
|
| 524 |
+
filter_res = self.filter_op(op)
|
| 525 |
+
if (
|
| 526 |
+
filter_res is not None
|
| 527 |
+
and res.get(filter_res.configuration_name(), None) is None
|
| 528 |
+
):
|
| 529 |
+
res[filter_res.configuration_name()] = filter_res
|
| 530 |
+
for op in res.values():
|
| 531 |
+
if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
|
| 532 |
+
num_3x_ops += 1
|
| 533 |
+
else:
|
| 534 |
+
num_2x_ops += 1
|
| 535 |
+
log.debug(
|
| 536 |
+
"Got cutlass configs: total number of ops: %d, "
|
| 537 |
+
"total number of 3x ops: %d, total number of 2x ops: %d",
|
| 538 |
+
len(res),
|
| 539 |
+
num_3x_ops,
|
| 540 |
+
num_2x_ops,
|
| 541 |
+
)
|
| 542 |
+
return list(res.values())[: inductor_cuda_config.cutlass_max_profiling_configs]
|
| 543 |
+
|
| 544 |
+
def gemm_mode(self) -> str:
|
| 545 |
+
sizes = self.output_node.get_size()
|
| 546 |
+
if len(sizes) > 2:
|
| 547 |
+
return "cutlass::gemm::GemmUniversalMode::kBatched"
|
| 548 |
+
else:
|
| 549 |
+
return "cutlass::gemm::GemmUniversalMode::kGemm"
|
| 550 |
+
|
| 551 |
+
def render_gemm_arguments(
|
| 552 |
+
self,
|
| 553 |
+
argument_template: str,
|
| 554 |
+
epilogue_template: str,
|
| 555 |
+
should_swap_xw: bool,
|
| 556 |
+
X: IRNode,
|
| 557 |
+
W: IRNode,
|
| 558 |
+
Bias: IRNode,
|
| 559 |
+
Y: IRNode,
|
| 560 |
+
alpha: float,
|
| 561 |
+
beta: float,
|
| 562 |
+
kernel: CUDATemplateKernel,
|
| 563 |
+
epilogue_args,
|
| 564 |
+
) -> str:
|
| 565 |
+
options = dict(
|
| 566 |
+
alpha=self.alpha,
|
| 567 |
+
beta=self.beta,
|
| 568 |
+
X=X,
|
| 569 |
+
W=W,
|
| 570 |
+
Y=Y,
|
| 571 |
+
Bias=Bias,
|
| 572 |
+
template=self,
|
| 573 |
+
kernel=kernel,
|
| 574 |
+
M="M",
|
| 575 |
+
N="N",
|
| 576 |
+
epilogue_args=epilogue_args,
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
if epilogue_template is not None:
|
| 580 |
+
if should_swap_xw:
|
| 581 |
+
# Swap
|
| 582 |
+
def clone_with_transposed_stride(node: IRNode) -> IRNode:
|
| 583 |
+
old_layout = node.get_layout()
|
| 584 |
+
new_stride = list(old_layout.stride)
|
| 585 |
+
new_stride[-2], new_stride[-1] = new_stride[-1], new_stride[-2]
|
| 586 |
+
new_layout = FixedLayout(
|
| 587 |
+
old_layout.device,
|
| 588 |
+
old_layout.dtype,
|
| 589 |
+
list(old_layout.size),
|
| 590 |
+
new_stride,
|
| 591 |
+
old_layout.offset,
|
| 592 |
+
)
|
| 593 |
+
return Buffer(node.get_name(), new_layout)
|
| 594 |
+
|
| 595 |
+
new_X = clone_with_transposed_stride(X)
|
| 596 |
+
new_W = clone_with_transposed_stride(W)
|
| 597 |
+
new_Bias = clone_with_transposed_stride(Bias)
|
| 598 |
+
new_Y = clone_with_transposed_stride(Y)
|
| 599 |
+
options["X"], options["W"], options["Bias"], options["Y"] = (
|
| 600 |
+
new_W,
|
| 601 |
+
new_X,
|
| 602 |
+
new_Bias,
|
| 603 |
+
new_Y,
|
| 604 |
+
)
|
| 605 |
+
options["M"], options["N"] = "N", "M"
|
| 606 |
+
|
| 607 |
+
epilogue_arguments = self._template_from_string(epilogue_template).render(
|
| 608 |
+
**options
|
| 609 |
+
)
|
| 610 |
+
arguments = self._template_from_string(argument_template).render(
|
| 611 |
+
epilogue_arguments=epilogue_arguments, **options
|
| 612 |
+
)
|
| 613 |
+
else:
|
| 614 |
+
arguments = self._template_from_string(GEMM_ARGS_CUTLASS_2X).render(
|
| 615 |
+
split_k=1, **options
|
| 616 |
+
)
|
| 617 |
+
return arguments
|
| 618 |
+
|
| 619 |
+
def render( # type: ignore[override]
|
| 620 |
+
self,
|
| 621 |
+
kernel: CUDATemplateKernel,
|
| 622 |
+
op: "cutlass_gemm_op.GemmOperation" = None, # type: ignore[name-defined]
|
| 623 |
+
template_buffer_node: Optional[CUDATemplateBuffer] = None,
|
| 624 |
+
epilogue_nodes: Optional[List[IRNode]] = None,
|
| 625 |
+
**kwargs,
|
| 626 |
+
) -> str:
|
| 627 |
+
if epilogue_nodes is not None and len(epilogue_nodes) > 0:
|
| 628 |
+
assert self.can_fuse_epilogue and CUTLASSGemmTemplate.supports_evt(
|
| 629 |
+
op
|
| 630 |
+
), "op does not support EVT epilogue fusion"
|
| 631 |
+
assert (
|
| 632 |
+
template_buffer_node is not None
|
| 633 |
+
), "Template node is required for epilogue fusion"
|
| 634 |
+
assert isinstance(
|
| 635 |
+
template_buffer_node, CUDATemplateBuffer
|
| 636 |
+
), f"Template node has to be a CUDATemplateBuffer, is type {type(template_buffer_node)}"
|
| 637 |
+
assert (
|
| 638 |
+
template_buffer_node.name is not None
|
| 639 |
+
), "Output node has to be a Buffer with a name"
|
| 640 |
+
# This is the name of the output of the Matmul, before epilogues are applied.
|
| 641 |
+
# it is not necessarily materialized in global memory if we have an epilogue
|
| 642 |
+
|
| 643 |
+
template_output_node_name = (
|
| 644 |
+
template_buffer_node.name if template_buffer_node is not None else None
|
| 645 |
+
)
|
| 646 |
+
|
| 647 |
+
assert cutlass_utils.try_import_cutlass()
|
| 648 |
+
import cutlass_library.gemm_operation as cutlass_gemm_op
|
| 649 |
+
import cutlass_library.library as cutlass_lib
|
| 650 |
+
|
| 651 |
+
assert isinstance(
|
| 652 |
+
op, cutlass_gemm_op.GemmOperation
|
| 653 |
+
), "op argument is required and has to be an instance of GemmOperation"
|
| 654 |
+
if template_buffer_node is not None:
|
| 655 |
+
self.output_node = template_buffer_node
|
| 656 |
+
if epilogue_nodes is not None and len(epilogue_nodes) > 0:
|
| 657 |
+
self.output_node = cast(Buffer, epilogue_nodes[-1])
|
| 658 |
+
|
| 659 |
+
assert len(self.input_nodes) >= 2 and self.output_node is not None
|
| 660 |
+
X, W = self.input_nodes[0], self.input_nodes[1]
|
| 661 |
+
Y = self.output_node
|
| 662 |
+
Bias = None if len(self.input_nodes) == 2 else self.input_nodes[2]
|
| 663 |
+
|
| 664 |
+
epilogue_template: Optional[str] = None
|
| 665 |
+
should_swap_xw: bool = False
|
| 666 |
+
epilogue_args = f"{{ElementComputeEpilogue({self.alpha}), ElementComputeEpilogue({self.beta})}}"
|
| 667 |
+
if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
|
| 668 |
+
if Bias is not None and self.has_tma_epilogue(op):
|
| 669 |
+
if self.should_swap_XW(Bias, self.beta):
|
| 670 |
+
# TMA epilogue requires bias vector in column major to get best perf.
|
| 671 |
+
op = self.swap_XW(op)
|
| 672 |
+
should_swap_xw = True
|
| 673 |
+
if epilogue_nodes is not None and len(epilogue_nodes) > 0:
|
| 674 |
+
epilogue_args = (
|
| 675 |
+
CutlassEVTEpilogueArgumentFormatter.ir_to_evt_argument_string(
|
| 676 |
+
cast(str, template_output_node_name), epilogue_nodes
|
| 677 |
+
)
|
| 678 |
+
)
|
| 679 |
+
epilogue_template = GEMM_ARGS_CUTLASS_3X_EPILOGUE
|
| 680 |
+
argument_template = GEMM_ARGS_CUTLASS_3X
|
| 681 |
+
else:
|
| 682 |
+
# TODO: Support split_k.
|
| 683 |
+
argument_template = GEMM_ARGS_CUTLASS_2X
|
| 684 |
+
|
| 685 |
+
instance_definition, instance_type = self.define_gemm_instance(
|
| 686 |
+
op, cast(str, template_output_node_name), epilogue_nodes
|
| 687 |
+
)
|
| 688 |
+
options = dict(
|
| 689 |
+
alpha=self.alpha,
|
| 690 |
+
beta=self.beta,
|
| 691 |
+
X=X,
|
| 692 |
+
W=W,
|
| 693 |
+
Y=Y,
|
| 694 |
+
Bias=Bias,
|
| 695 |
+
epilogue_template=epilogue_template,
|
| 696 |
+
argument_template=argument_template,
|
| 697 |
+
should_swap_xw=should_swap_xw,
|
| 698 |
+
template=self,
|
| 699 |
+
kernel=kernel,
|
| 700 |
+
instance_definition=instance_definition,
|
| 701 |
+
instance_type=instance_type,
|
| 702 |
+
input_reorder=self.input_reorder,
|
| 703 |
+
epilogue_args=epilogue_args,
|
| 704 |
+
)
|
| 705 |
+
res = self._template_from_string(GEMM_TEMPLATE).render(**options)
|
| 706 |
+
return res
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
from collections import defaultdict
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Dict, List, Tuple
|
| 5 |
+
|
| 6 |
+
from sympy import Integer
|
| 7 |
+
|
| 8 |
+
from .. import metrics
|
| 9 |
+
from ..scheduler import SchedulerNode
|
| 10 |
+
from ..utils import ceildiv, Placeholder
|
| 11 |
+
from ..virtualized import V
|
| 12 |
+
from .common import IndentedBuffer, Kernel
|
| 13 |
+
from .triton import TritonKernel
|
| 14 |
+
from .triton_utils import config_of, signature_to_meta
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@dataclass
|
| 18 |
+
class PartitionState:
|
| 19 |
+
partitions: List[
|
| 20 |
+
List[Tuple[List[SchedulerNode], Tuple[Integer, ...], Integer, Integer]]
|
| 21 |
+
]
|
| 22 |
+
cur_partition: List[
|
| 23 |
+
Tuple[List[SchedulerNode], Tuple[Integer, ...], Integer, Integer]
|
| 24 |
+
]
|
| 25 |
+
cur_count: int
|
| 26 |
+
|
| 27 |
+
def finalize(self):
|
| 28 |
+
if self.cur_partition:
|
| 29 |
+
self.partitions.append(self.cur_partition)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class ForeachKernel(Kernel):
|
| 33 |
+
MAX_NUM_ARGS = 250 # number where I would no longer get triton errors
|
| 34 |
+
|
| 35 |
+
@staticmethod
|
| 36 |
+
def _update_partition(partition_state, node_rw_count, node_info):
|
| 37 |
+
if partition_state.cur_count + node_rw_count > ForeachKernel.MAX_NUM_ARGS:
|
| 38 |
+
partition_state.partitions.append(partition_state.cur_partition)
|
| 39 |
+
partition_state.cur_partition = [node_info]
|
| 40 |
+
partition_state.cur_count = node_rw_count
|
| 41 |
+
else:
|
| 42 |
+
partition_state.cur_count += node_rw_count
|
| 43 |
+
partition_state.cur_partition.append(node_info)
|
| 44 |
+
|
| 45 |
+
@staticmethod
|
| 46 |
+
def horizontal_partition(subkernel_nodes, triton_scheduling):
|
| 47 |
+
"""Generates a list of lists of node info tuples which consist of (fused_nodes, tiling, numel, rnumel)
|
| 48 |
+
for each subkernel node where each sublist is guaranteed to not exceed CUDA limits for number of args
|
| 49 |
+
(read/writes) and to have the same 2D or 1D blocking strategy."""
|
| 50 |
+
assert len(subkernel_nodes) >= 1
|
| 51 |
+
|
| 52 |
+
partition_state_1d = PartitionState([], [], 0)
|
| 53 |
+
yelem_to_partition_state_2d: Dict[Integer, PartitionState] = defaultdict(
|
| 54 |
+
lambda: PartitionState([], [], 0)
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
for node in subkernel_nodes:
|
| 58 |
+
fused_nodes = node.get_nodes()
|
| 59 |
+
_, (numel, rnumel) = max(
|
| 60 |
+
fused_nodes, key=lambda x: int(x.is_reduction())
|
| 61 |
+
).group
|
| 62 |
+
tiled_groups = triton_scheduling.select_tiling(fused_nodes, numel, rnumel)
|
| 63 |
+
node_info = fused_nodes, tiled_groups, numel, rnumel
|
| 64 |
+
|
| 65 |
+
read_writes = node.read_writes
|
| 66 |
+
read_write_count = len(read_writes.reads) + len(read_writes.writes)
|
| 67 |
+
|
| 68 |
+
if tiled_groups[1] == 1:
|
| 69 |
+
ForeachKernel._update_partition(
|
| 70 |
+
partition_state_1d, read_write_count, node_info
|
| 71 |
+
)
|
| 72 |
+
else:
|
| 73 |
+
y_elem = tiled_groups[0]
|
| 74 |
+
partition_state_2d = yelem_to_partition_state_2d[y_elem]
|
| 75 |
+
ForeachKernel._update_partition(
|
| 76 |
+
partition_state_2d, read_write_count, node_info
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
partition_state_1d.finalize()
|
| 80 |
+
all_partitions = partition_state_1d.partitions
|
| 81 |
+
for partition_state_2d in yelem_to_partition_state_2d.values():
|
| 82 |
+
partition_state_2d.finalize()
|
| 83 |
+
all_partitions.extend(partition_state_2d.partitions)
|
| 84 |
+
|
| 85 |
+
return all_partitions
|
| 86 |
+
|
| 87 |
+
def __init__(self):
|
| 88 |
+
super().__init__()
|
| 89 |
+
self.blocking_2d = False
|
| 90 |
+
self.block_size_1d = 1024 # Try tuning this value
|
| 91 |
+
self.block_size_2d = 32
|
| 92 |
+
self.num_warps = 8
|
| 93 |
+
self.sub_kernels = []
|
| 94 |
+
self.iter_vars_count = itertools.count()
|
| 95 |
+
self.x_block_count = 0
|
| 96 |
+
self.y_block_count = 0
|
| 97 |
+
|
| 98 |
+
def get_block_size(self):
|
| 99 |
+
if self.blocking_2d:
|
| 100 |
+
return self.block_size_2d
|
| 101 |
+
else:
|
| 102 |
+
return self.block_size_1d
|
| 103 |
+
|
| 104 |
+
@staticmethod
|
| 105 |
+
def codegen_pid_offsets(code, block_count, lower_bound, prefix):
|
| 106 |
+
if block_count == 0:
|
| 107 |
+
code.splice(f"{prefix}pid_offset = {prefix}pid")
|
| 108 |
+
else:
|
| 109 |
+
code.splice(f"{prefix}pid_offset = {prefix}pid - {lower_bound}")
|
| 110 |
+
|
| 111 |
+
def codegen_pid_range(self, code, x_elems):
|
| 112 |
+
num_x_blocks = ceildiv(x_elems, self.get_block_size())
|
| 113 |
+
upper_bound_x_pid = self.x_block_count + num_x_blocks
|
| 114 |
+
lower_bound_x_pid = self.x_block_count
|
| 115 |
+
|
| 116 |
+
if self.x_block_count == 0:
|
| 117 |
+
cond = "if"
|
| 118 |
+
else:
|
| 119 |
+
cond = "elif"
|
| 120 |
+
|
| 121 |
+
x_pid_bounds_check = (
|
| 122 |
+
f"xpid >= {lower_bound_x_pid} and xpid < {upper_bound_x_pid}"
|
| 123 |
+
)
|
| 124 |
+
code.splice(f"{cond} {x_pid_bounds_check}:")
|
| 125 |
+
|
| 126 |
+
with code.indent():
|
| 127 |
+
ForeachKernel.codegen_pid_offsets(
|
| 128 |
+
code, num_x_blocks, lower_bound_x_pid, "x"
|
| 129 |
+
)
|
| 130 |
+
self.x_block_count += num_x_blocks
|
| 131 |
+
|
| 132 |
+
def create_sub_kernel(self, *groups, index_dtype, mutations, reduction_hint):
|
| 133 |
+
sub_kernel = TritonKernel(
|
| 134 |
+
*groups,
|
| 135 |
+
index_dtype=index_dtype,
|
| 136 |
+
mutations=mutations,
|
| 137 |
+
pid_cache={
|
| 138 |
+
"tl.program_id(0)": "xpid_offset",
|
| 139 |
+
"tl.program_id(1)": "ypid",
|
| 140 |
+
},
|
| 141 |
+
reduction_hint=reduction_hint,
|
| 142 |
+
)
|
| 143 |
+
if self.blocking_2d:
|
| 144 |
+
assert len(groups) == 3
|
| 145 |
+
|
| 146 |
+
self.blocking_2d |= groups[1] != 1 and len(groups) == 3
|
| 147 |
+
metrics.generated_kernel_count -= 1
|
| 148 |
+
sub_kernel.args = self.args
|
| 149 |
+
sub_kernel.iter_vars_count = self.iter_vars_count
|
| 150 |
+
sub_kernel.cse.iter_buffer_ids = self.cse.iter_buffer_ids
|
| 151 |
+
self.sub_kernels.append(sub_kernel)
|
| 152 |
+
return sub_kernel
|
| 153 |
+
|
| 154 |
+
def jit_line(self):
|
| 155 |
+
can_use_32bit = all(k.index_dtype == "tl.int32" for k in self.sub_kernels)
|
| 156 |
+
size_dtype = "tl.int32" if can_use_32bit else "tl.int64"
|
| 157 |
+
_, _, signature = self.args.python_argdefs()
|
| 158 |
+
triton_meta = {
|
| 159 |
+
"signature": signature_to_meta(signature, size_dtype=size_dtype),
|
| 160 |
+
"device": V.graph.scheduler.current_device.index,
|
| 161 |
+
"device_type": V.graph.scheduler.current_device.type,
|
| 162 |
+
"constants": {},
|
| 163 |
+
}
|
| 164 |
+
triton_meta["configs"] = [config_of(signature)]
|
| 165 |
+
inductor_meta = {"kernel_name": str(Placeholder.DESCRIPTIVE_NAME)}
|
| 166 |
+
return (
|
| 167 |
+
f"@foreach(num_warps={self.num_warps}, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r})\n"
|
| 168 |
+
+ "@triton.jit"
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
def grid(self):
|
| 172 |
+
return (
|
| 173 |
+
self.x_block_count,
|
| 174 |
+
ceildiv(int(self.sub_kernels[0].numels[0]), self.block_size_2d)
|
| 175 |
+
if self.blocking_2d
|
| 176 |
+
else 1,
|
| 177 |
+
1,
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
def codegen_kernel(self, name=None):
|
| 181 |
+
code = IndentedBuffer()
|
| 182 |
+
|
| 183 |
+
code.splice(
|
| 184 |
+
"""
|
| 185 |
+
import triton
|
| 186 |
+
import triton.language as tl
|
| 187 |
+
from torch._inductor.triton_heuristics import foreach
|
| 188 |
+
from torch._inductor.utils import instance_descriptor
|
| 189 |
+
from torch._inductor import triton_helpers
|
| 190 |
+
"""
|
| 191 |
+
)
|
| 192 |
+
argdefs, _, _ = self.args.python_argdefs()
|
| 193 |
+
code.writeline(self.jit_line())
|
| 194 |
+
code.writeline(
|
| 195 |
+
f"def {name or str(Placeholder.KERNEL_NAME)}({', '.join(argdefs)}):"
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
with code.indent():
|
| 199 |
+
code.splice("xpid = tl.program_id(0)")
|
| 200 |
+
if self.blocking_2d:
|
| 201 |
+
code.splice("ypid = tl.program_id(1)")
|
| 202 |
+
code.splice(f"XBLOCK: tl.constexpr = {self.block_size_2d}")
|
| 203 |
+
code.splice(f"YBLOCK: tl.constexpr = {self.block_size_2d}")
|
| 204 |
+
else:
|
| 205 |
+
code.splice(f"XBLOCK: tl.constexpr = {self.block_size_1d}")
|
| 206 |
+
|
| 207 |
+
for sub_kernel in self.sub_kernels:
|
| 208 |
+
assert len(sub_kernel.numels) <= 3
|
| 209 |
+
# TODO mlazos: support dynamic shapes
|
| 210 |
+
numel_ind = 0 if not self.blocking_2d else 1
|
| 211 |
+
self.codegen_pid_range(code, int(sub_kernel.numels[numel_ind]))
|
| 212 |
+
with code.indent():
|
| 213 |
+
if self.blocking_2d:
|
| 214 |
+
code.splice(f"ynumel = {sub_kernel.numels[0]}")
|
| 215 |
+
code.splice(f"xnumel = {sub_kernel.numels[1]}")
|
| 216 |
+
else:
|
| 217 |
+
code.splice(f"xnumel = {sub_kernel.numels[0]}")
|
| 218 |
+
|
| 219 |
+
sub_kernel.codegen_body()
|
| 220 |
+
code.splice(sub_kernel.body)
|
| 221 |
+
|
| 222 |
+
code.splice("else:")
|
| 223 |
+
with code.indent():
|
| 224 |
+
code.splice("pass")
|
| 225 |
+
|
| 226 |
+
return code.getvalue()
|
| 227 |
+
|
| 228 |
+
def call_kernel(self, code, name: str):
|
| 229 |
+
_, call_args, _ = self.args.python_argdefs()
|
| 230 |
+
# dynamo wraps unspec variable as 0d CPU tensor, need convert to scalar
|
| 231 |
+
for i in range(len(call_args)):
|
| 232 |
+
if V.graph.is_unspec_arg(call_args[i]):
|
| 233 |
+
call_args[i] = call_args[i] + ".item()"
|
| 234 |
+
if V.graph.cpp_wrapper:
|
| 235 |
+
V.graph.wrapper_code.generate_kernel_call(
|
| 236 |
+
name,
|
| 237 |
+
call_args,
|
| 238 |
+
device_index=V.graph.scheduler.current_device.index,
|
| 239 |
+
grid=self.grid(),
|
| 240 |
+
)
|
| 241 |
+
else:
|
| 242 |
+
# TODO: refactor generate_kernel_call
|
| 243 |
+
call_args_str = ", ".join(call_args)
|
| 244 |
+
stream_name = code.write_get_raw_stream(
|
| 245 |
+
V.graph.scheduler.current_device.index
|
| 246 |
+
)
|
| 247 |
+
code.writeline(
|
| 248 |
+
f"{name}.run({call_args_str}, grid=({self.grid()}), stream={stream_name})"
|
| 249 |
+
)
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from . import mm, mm_common, mm_plus_mm, unpack_mixed_mm
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (275 Bytes). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/bmm.cpython-310.pyc
ADDED
|
Binary file (3.9 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/conv.cpython-310.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_common.cpython-310.pyc
ADDED
|
Binary file (5.4 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_plus_mm.cpython-310.pyc
ADDED
|
Binary file (5.26 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/unpack_mixed_mm.cpython-310.pyc
ADDED
|
Binary file (3 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/conv.py
ADDED
|
@@ -0,0 +1,489 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import functools
|
| 4 |
+
import logging
|
| 5 |
+
from typing import cast, List, Optional, Sequence, Tuple, TypedDict
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from .. import config, ir
|
| 9 |
+
from ..ir import TensorBox
|
| 10 |
+
|
| 11 |
+
from ..lowering import (
|
| 12 |
+
add_layout_constraint,
|
| 13 |
+
constrain_to_fx_strides,
|
| 14 |
+
lowerings as L,
|
| 15 |
+
register_lowering,
|
| 16 |
+
)
|
| 17 |
+
from ..select_algorithm import (
|
| 18 |
+
autotune_select_algorithm,
|
| 19 |
+
ExternKernelChoice,
|
| 20 |
+
TritonTemplate,
|
| 21 |
+
)
|
| 22 |
+
from ..utils import (
|
| 23 |
+
ceildiv,
|
| 24 |
+
is_ones,
|
| 25 |
+
is_zeros,
|
| 26 |
+
pad_listlike,
|
| 27 |
+
sympy_product,
|
| 28 |
+
use_triton_template,
|
| 29 |
+
)
|
| 30 |
+
from ..virtualized import V
|
| 31 |
+
from .mm_common import filtered_configs
|
| 32 |
+
|
| 33 |
+
log = logging.getLogger(__name__)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
aten = torch.ops.aten
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def conv_grid(n, c, h, w, meta):
|
| 40 |
+
return (
|
| 41 |
+
ceildiv(n * h * w, meta["BLOCK_M"]),
|
| 42 |
+
ceildiv(c, meta["BLOCK_N"]),
|
| 43 |
+
meta["GROUPS"],
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# List of dictionaries to store the kernel configs. Configs that evaluate to true
|
| 48 |
+
# will be utilised on the target platform
|
| 49 |
+
kernel_configs = [
|
| 50 |
+
# "BLOCK_M", "BLOCK_N", "BLOCK_K", "num_stages", "num_warps"
|
| 51 |
+
{"config": (64, 256, 16, 2, 4), "cond": True},
|
| 52 |
+
{"config": (256, 64, 16, 2, 4), "cond": True},
|
| 53 |
+
{"config": (1024, 16, 16, 1, 8), "cond": True},
|
| 54 |
+
{"config": (128, 128, 32, 2, 8), "cond": True},
|
| 55 |
+
{"config": (64, 64, 32, 2, 4), "cond": True},
|
| 56 |
+
{"config": (64, 256, 32, 2, 8), "cond": True},
|
| 57 |
+
{"config": (256, 64, 32, 2, 8), "cond": True},
|
| 58 |
+
]
|
| 59 |
+
|
| 60 |
+
# Create filtered list of configs based on conv
|
| 61 |
+
platform_configs = tuple(
|
| 62 |
+
cast(Tuple[int, int, int, int, int], config["config"])
|
| 63 |
+
for config in kernel_configs
|
| 64 |
+
if config["cond"]
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
# On ROCm convert num_stages to 1 as pipelining provides no benefit
|
| 68 |
+
if torch.version.hip:
|
| 69 |
+
platform_configs = tuple(
|
| 70 |
+
(config[0], config[1], config[2], 1, config[4]) for config in platform_configs
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
conv_configs = functools.partial(
|
| 74 |
+
filtered_configs,
|
| 75 |
+
configs=platform_configs,
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
LOOP_BODY = """
|
| 79 |
+
idx_x_h = i - PADDING_H + idx_y_h * STRIDE_H
|
| 80 |
+
idx_x_w = j - PADDING_W + idx_y_w * STRIDE_W
|
| 81 |
+
idx_x_c = tl.arange(0, BLOCK_K) + k
|
| 82 |
+
|
| 83 |
+
x_ptrs = x_base + (
|
| 84 |
+
(idx_x_h * stride_xh)[:, None]
|
| 85 |
+
+ (idx_x_w * stride_xw)[:, None]
|
| 86 |
+
+ (idx_x_c * stride_xc)[None, :]
|
| 87 |
+
)
|
| 88 |
+
mask_x = (
|
| 89 |
+
(idx_n < BATCH)[:, None]
|
| 90 |
+
& (idx_x_h >= 0)[:, None]
|
| 91 |
+
& (idx_x_h < IN_H)[:, None]
|
| 92 |
+
& (idx_x_w >= 0)[:, None]
|
| 93 |
+
& (idx_x_w < IN_W)[:, None]
|
| 94 |
+
& (idx_x_c < GROUP_IN_C)[None, :]
|
| 95 |
+
)
|
| 96 |
+
matrix_x = tl.load(x_ptrs, mask=mask_x, other=0.0)
|
| 97 |
+
|
| 98 |
+
w_ptrs = w_base + (
|
| 99 |
+
(idx_x_c * stride_wc_in)[:, None] + (i * stride_wh) + (j * stride_ww)
|
| 100 |
+
)
|
| 101 |
+
mask_w = (idx_x_c[:, None] < GROUP_IN_C) & (idx_y_c[None, :] < GROUP_OUT_C)
|
| 102 |
+
matrix_w = tl.load(w_ptrs, mask=mask_w, other=0.0)
|
| 103 |
+
acc += tl.dot(matrix_x, matrix_w, allow_tf32=ALLOW_TF32)
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
"""
|
| 107 |
+
This is a relatively simple conv implementation that can likely be
|
| 108 |
+
improved. Many alternate conv versions can be found here:
|
| 109 |
+
https://github.com/pytorch/torchdynamo/pull/971
|
| 110 |
+
"""
|
| 111 |
+
conv2d_template = TritonTemplate(
|
| 112 |
+
name="convolution",
|
| 113 |
+
grid=conv_grid,
|
| 114 |
+
source=r"""
|
| 115 |
+
{{def_kernel("X", "W")}}
|
| 116 |
+
# Tensor dimensions
|
| 117 |
+
BATCH = {{size("X", 0)}}
|
| 118 |
+
IN_C = {{size("X", 1)}}
|
| 119 |
+
IN_H = {{size("X", 2)}}
|
| 120 |
+
IN_W = {{size("X", 3)}}
|
| 121 |
+
OUT_C = {{size(None, 1)}}
|
| 122 |
+
OUT_H = {{size(None, 2)}}
|
| 123 |
+
OUT_W = {{size(None, 3)}}
|
| 124 |
+
|
| 125 |
+
# Strides:
|
| 126 |
+
stride_xn = {{stride("X", 0)}}
|
| 127 |
+
stride_xc = {{stride("X", 1)}}
|
| 128 |
+
stride_xh = {{stride("X", 2)}}
|
| 129 |
+
stride_xw = {{stride("X", 3)}}
|
| 130 |
+
stride_wc_out = {{stride("W", 0)}}
|
| 131 |
+
stride_wc_in = {{stride("W", 1)}}
|
| 132 |
+
stride_wh = {{stride("W", 2)}}
|
| 133 |
+
stride_ww = {{stride("W", 3)}}
|
| 134 |
+
|
| 135 |
+
nhw = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
|
| 136 |
+
idx_y_w = nhw % OUT_W
|
| 137 |
+
nh = nhw // OUT_W
|
| 138 |
+
idx_y_h = nh % OUT_H
|
| 139 |
+
idx_n = nh // OUT_H
|
| 140 |
+
idx_y_c = tl.program_id(1) * BLOCK_N + tl.arange(0, BLOCK_N)
|
| 141 |
+
|
| 142 |
+
{% if GROUPS == 1 %}
|
| 143 |
+
group = 0
|
| 144 |
+
GROUP_IN_C = IN_C
|
| 145 |
+
GROUP_OUT_C = OUT_C
|
| 146 |
+
{% else %}
|
| 147 |
+
group = tl.program_id(2)
|
| 148 |
+
GROUP_IN_C = IN_C // GROUPS
|
| 149 |
+
GROUP_OUT_C = OUT_C // GROUPS
|
| 150 |
+
{% endif %}
|
| 151 |
+
|
| 152 |
+
x_base = X + (group * stride_xc * GROUP_IN_C + idx_n * stride_xn)[:, None]
|
| 153 |
+
w_base = (
|
| 154 |
+
W + (group * stride_wc_out * GROUP_OUT_C + idx_y_c * stride_wc_out)[None, :]
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
|
| 158 |
+
|
| 159 |
+
{% if UNROLL %}
|
| 160 |
+
{% for i in range(KERNEL_H) %}
|
| 161 |
+
{% for j in range(KERNEL_W) %}
|
| 162 |
+
i = {{i}}
|
| 163 |
+
j = {{j}}
|
| 164 |
+
for k in range(0, GROUP_IN_C, BLOCK_K):
|
| 165 |
+
"""
|
| 166 |
+
+ LOOP_BODY
|
| 167 |
+
+ """
|
| 168 |
+
{% endfor %}
|
| 169 |
+
{% endfor %}
|
| 170 |
+
{% else %}
|
| 171 |
+
# Could be simplified, but slightly slower:
|
| 172 |
+
# for i in range(KERNEL_H):
|
| 173 |
+
# for j in range(KERNEL_W):
|
| 174 |
+
# for k in range(0, GROUP_IN_C, BLOCK_K):
|
| 175 |
+
BLOCK_K_COUNT = (GROUP_IN_C + BLOCK_K - 1) // BLOCK_K
|
| 176 |
+
for ijk in range(KERNEL_H * KERNEL_W * BLOCK_K_COUNT):
|
| 177 |
+
k = (ijk % BLOCK_K_COUNT) * BLOCK_K
|
| 178 |
+
ij = ijk // BLOCK_K_COUNT
|
| 179 |
+
i = ij // KERNEL_W
|
| 180 |
+
j = ij % KERNEL_W
|
| 181 |
+
"""
|
| 182 |
+
+ LOOP_BODY
|
| 183 |
+
+ """
|
| 184 |
+
{% endif %}
|
| 185 |
+
|
| 186 |
+
mask = (
|
| 187 |
+
(idx_n < BATCH)[:, None]
|
| 188 |
+
& (idx_y_h < OUT_H)[:, None]
|
| 189 |
+
& (idx_y_w < OUT_W)[:, None]
|
| 190 |
+
& (idx_y_c < GROUP_OUT_C)[None, :]
|
| 191 |
+
)
|
| 192 |
+
idx_n = idx_n[:, None]
|
| 193 |
+
idx_c = idx_y_c[None, :] + group * GROUP_OUT_C
|
| 194 |
+
idx_h = idx_y_h[:, None]
|
| 195 |
+
idx_w = idx_y_w[:, None]
|
| 196 |
+
|
| 197 |
+
# inductor generates a suffix
|
| 198 |
+
{{store_output(("idx_n", "idx_c", "idx_h", "idx_w"), "acc", "mask")}}
|
| 199 |
+
""",
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
aten_convolution = ExternKernelChoice(
|
| 203 |
+
torch.convolution,
|
| 204 |
+
"at::convolution",
|
| 205 |
+
has_out_variant=False,
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def conv1x1_via_mm(x, w, *, out):
|
| 210 |
+
w = torch.squeeze(torch.squeeze(w, -1), -1)
|
| 211 |
+
return torch.matmul(
|
| 212 |
+
x.permute(0, 2, 3, 1), w.permute(1, 0), out=out.permute(0, 2, 3, 1)
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
aten_conv1x1_via_mm = ExternKernelChoice(conv1x1_via_mm, None)
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
class ConvLayoutParams(TypedDict):
|
| 220 |
+
stride: tuple[int, ...]
|
| 221 |
+
padding: tuple[int, ...]
|
| 222 |
+
dilation: tuple[int, ...]
|
| 223 |
+
transposed: bool
|
| 224 |
+
output_padding: tuple[int, ...]
|
| 225 |
+
groups: int
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def conv_layout(
|
| 229 |
+
x: TensorBox,
|
| 230 |
+
weight: TensorBox,
|
| 231 |
+
bias: Optional[TensorBox],
|
| 232 |
+
stride: Sequence[int],
|
| 233 |
+
padding: tuple[int, ...],
|
| 234 |
+
dilation: tuple[int, ...],
|
| 235 |
+
transposed: bool,
|
| 236 |
+
output_padding: tuple[int, ...],
|
| 237 |
+
groups: int,
|
| 238 |
+
) -> ir.Layout:
|
| 239 |
+
"""Determine output layout for a convolution"""
|
| 240 |
+
with V.graph.fake_mode:
|
| 241 |
+
output = torch.ops.aten.convolution(
|
| 242 |
+
ir.ir_node_to_tensor(x, guard_shape=True),
|
| 243 |
+
ir.ir_node_to_tensor(weight, guard_shape=True),
|
| 244 |
+
ir.ir_node_to_tensor(bias, guard_shape=True),
|
| 245 |
+
stride,
|
| 246 |
+
tuple(V.graph.sizevars.size_hint(p) for p in padding),
|
| 247 |
+
dilation,
|
| 248 |
+
transposed,
|
| 249 |
+
tuple(V.graph.sizevars.size_hint(p) for p in output_padding),
|
| 250 |
+
groups,
|
| 251 |
+
)
|
| 252 |
+
sizes = ir.convert_shape_to_inductor(output.size())
|
| 253 |
+
stride = ir.convert_shape_to_inductor(output.stride())
|
| 254 |
+
|
| 255 |
+
return ir.FixedLayout(
|
| 256 |
+
x.get_device(),
|
| 257 |
+
x.get_dtype(),
|
| 258 |
+
sizes,
|
| 259 |
+
stride,
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def channels_last_order(rank):
|
| 264 |
+
order = list(reversed(range(rank)))
|
| 265 |
+
order.insert(1, order.pop(-1))
|
| 266 |
+
return order
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def convert_1x1_conv_to_mm(x, weight, bias):
|
| 270 |
+
# special case for 1x1 convolution, which is actually just a matmul
|
| 271 |
+
rank = len(weight.get_size())
|
| 272 |
+
for _ in range(rank - 2):
|
| 273 |
+
weight = L[aten.squeeze](weight, dim=-1)
|
| 274 |
+
weight = L[aten.permute](weight, [1, 0])
|
| 275 |
+
|
| 276 |
+
if x.get_size()[0] != 1:
|
| 277 |
+
x = ir.ExternKernel.require_stride_order(x, channels_last_order(rank))
|
| 278 |
+
else:
|
| 279 |
+
x.realize()
|
| 280 |
+
x.freeze_layout()
|
| 281 |
+
|
| 282 |
+
x_permute = list(range(rank))
|
| 283 |
+
x_permute.append(x_permute.pop(1))
|
| 284 |
+
x = L[aten.permute](x, x_permute)
|
| 285 |
+
*sizes, in_chan = x.get_size()
|
| 286 |
+
x = L[aten.reshape](x, [sympy_product(sizes), in_chan])
|
| 287 |
+
if bias is None:
|
| 288 |
+
result = L[aten.mm](x, weight)
|
| 289 |
+
else:
|
| 290 |
+
result = L[aten.addmm](bias, x, weight)
|
| 291 |
+
result = L[aten.reshape](result, [*sizes, -1])
|
| 292 |
+
result_permute = list(range(rank))
|
| 293 |
+
result_permute.insert(1, result_permute.pop(-1))
|
| 294 |
+
return L[aten.permute](result, result_permute)
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
@register_lowering(aten.convolution)
|
| 298 |
+
def convolution(
|
| 299 |
+
x: TensorBox,
|
| 300 |
+
weight: TensorBox,
|
| 301 |
+
bias: TensorBox,
|
| 302 |
+
stride: List[int],
|
| 303 |
+
padding: List[int],
|
| 304 |
+
dilation: List[int],
|
| 305 |
+
transposed: bool,
|
| 306 |
+
output_padding: List[int],
|
| 307 |
+
groups: int,
|
| 308 |
+
):
|
| 309 |
+
stride = tuple(stride)
|
| 310 |
+
padding = tuple(padding)
|
| 311 |
+
dilation = tuple(dilation)
|
| 312 |
+
output_padding = tuple(output_padding)
|
| 313 |
+
if not isinstance(groups, int):
|
| 314 |
+
groups = V.graph.sizevars.evaluate_static_shape(groups)
|
| 315 |
+
assert isinstance(groups, int)
|
| 316 |
+
kwargs: ConvLayoutParams = {
|
| 317 |
+
"stride": stride,
|
| 318 |
+
"padding": padding,
|
| 319 |
+
"dilation": dilation,
|
| 320 |
+
"transposed": transposed,
|
| 321 |
+
"output_padding": output_padding,
|
| 322 |
+
"groups": groups,
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
if len(x.get_size()) == len(weight.get_size()) - 1:
|
| 326 |
+
# add batch dimension to simplify rest of function
|
| 327 |
+
return L[aten.squeeze](
|
| 328 |
+
convolution(L[aten.expand](x, [1, *x.get_size()]), weight, bias, **kwargs),
|
| 329 |
+
dim=0,
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
out_chan, in_chan, *kernel_shape = V.graph.sizevars.evaluate_static_shapes(
|
| 333 |
+
weight.get_size()
|
| 334 |
+
)
|
| 335 |
+
ndim = len(kernel_shape)
|
| 336 |
+
stride = pad_listlike(stride, ndim)
|
| 337 |
+
padding = pad_listlike(padding, ndim)
|
| 338 |
+
dilation = pad_listlike(dilation, ndim)
|
| 339 |
+
output_padding = pad_listlike(output_padding, ndim)
|
| 340 |
+
|
| 341 |
+
def channels_last_conv():
|
| 342 |
+
if V.graph.layout_opt and ndim == 2:
|
| 343 |
+
return True
|
| 344 |
+
|
| 345 |
+
layout = conv_layout(x, weight, None, **kwargs)
|
| 346 |
+
req_stride_order = ir.get_stride_order(
|
| 347 |
+
V.graph.sizevars.size_hints(layout.stride)
|
| 348 |
+
)
|
| 349 |
+
return req_stride_order == ir.NHWC_STRIDE_ORDER
|
| 350 |
+
|
| 351 |
+
autotuning_gemm = config.max_autotune or config.max_autotune_gemm
|
| 352 |
+
|
| 353 |
+
if (
|
| 354 |
+
(config.conv_1x1_as_mm or (autotuning_gemm and channels_last_conv()))
|
| 355 |
+
and is_ones(kernel_shape)
|
| 356 |
+
and is_ones(stride)
|
| 357 |
+
and is_zeros(padding)
|
| 358 |
+
and is_ones(dilation)
|
| 359 |
+
and not transposed
|
| 360 |
+
and is_zeros(output_padding)
|
| 361 |
+
and groups == 1
|
| 362 |
+
):
|
| 363 |
+
return convert_1x1_conv_to_mm(x, weight, bias)
|
| 364 |
+
|
| 365 |
+
if bias is not None and ir.get_device_type(x) != "cpu":
|
| 366 |
+
# peel off the bias, cudnn is slower with it
|
| 367 |
+
result = convolution(x, weight, None, **kwargs)
|
| 368 |
+
return L[aten.add](
|
| 369 |
+
result, L[aten.view](bias, [result.get_size()[1]] + ndim * [1])
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
x.realize()
|
| 373 |
+
weight.realize()
|
| 374 |
+
|
| 375 |
+
# ndim can be 1 for convolution in models such as demucs
|
| 376 |
+
# TODO: check if it's beneficial to convert Conv1d to Conv2d and then
|
| 377 |
+
# apply channels last.
|
| 378 |
+
if V.graph.layout_opt and ndim == 2:
|
| 379 |
+
V.graph.num_channels_last_conv += 1
|
| 380 |
+
x = ir.ExternKernel.require_channels_last(x)
|
| 381 |
+
# TODO maybe we can convert weights to channels last just once before
|
| 382 |
+
# running the model.
|
| 383 |
+
weight = ir.ExternKernel.require_channels_last(weight)
|
| 384 |
+
layout = conv_layout(x, weight, None, **kwargs)
|
| 385 |
+
else:
|
| 386 |
+
layout = conv_layout(x, weight, None, **kwargs)
|
| 387 |
+
req_stride_order = ir.get_stride_order(
|
| 388 |
+
V.graph.sizevars.size_hints(layout.stride)
|
| 389 |
+
)
|
| 390 |
+
x = ir.ExternKernel.require_stride_order(x, req_stride_order)
|
| 391 |
+
weight = ir.ExternKernel.require_stride_order(weight, req_stride_order)
|
| 392 |
+
|
| 393 |
+
ordered_kwargs_for_cpp_kernel = [
|
| 394 |
+
"stride",
|
| 395 |
+
"padding",
|
| 396 |
+
"dilation",
|
| 397 |
+
"transposed",
|
| 398 |
+
"output_padding",
|
| 399 |
+
"groups",
|
| 400 |
+
]
|
| 401 |
+
if bias is None:
|
| 402 |
+
args = [x, weight]
|
| 403 |
+
kwargs["bias"] = None # type: ignore[typeddict-unknown-key]
|
| 404 |
+
ordered_kwargs_for_cpp_kernel.insert(0, "bias")
|
| 405 |
+
else:
|
| 406 |
+
args = [x, weight, bias]
|
| 407 |
+
bias.realize()
|
| 408 |
+
bias.freeze_layout()
|
| 409 |
+
V.graph.sizevars.evaluate_static_shapes(bias.get_size())
|
| 410 |
+
|
| 411 |
+
choices = [
|
| 412 |
+
aten_convolution.bind(args, layout, ordered_kwargs_for_cpp_kernel, **kwargs)
|
| 413 |
+
]
|
| 414 |
+
if (
|
| 415 |
+
use_triton_template(layout)
|
| 416 |
+
# templates only support these:
|
| 417 |
+
and ndim == 2
|
| 418 |
+
and is_ones(dilation)
|
| 419 |
+
and not transposed
|
| 420 |
+
and is_zeros(output_padding)
|
| 421 |
+
# there are some odd models where this check fails (e.g. shufflenet_v2_x1_0)
|
| 422 |
+
and V.graph.sizevars.statically_known_equals(in_chan, x.get_size()[1])
|
| 423 |
+
):
|
| 424 |
+
if (
|
| 425 |
+
is_ones(kernel_shape)
|
| 426 |
+
and is_ones(stride)
|
| 427 |
+
and is_zeros(padding)
|
| 428 |
+
and groups == 1
|
| 429 |
+
):
|
| 430 |
+
choices.append(aten_conv1x1_via_mm.bind(args, layout))
|
| 431 |
+
|
| 432 |
+
for cfg in conv_configs(
|
| 433 |
+
sympy_product([x.get_size()[0], *x.get_size()[2:]]),
|
| 434 |
+
out_chan,
|
| 435 |
+
in_chan,
|
| 436 |
+
):
|
| 437 |
+
conv2d_template.maybe_append_choice(
|
| 438 |
+
choices,
|
| 439 |
+
input_nodes=(x, weight),
|
| 440 |
+
layout=layout,
|
| 441 |
+
KERNEL_H=kernel_shape[0],
|
| 442 |
+
KERNEL_W=kernel_shape[1],
|
| 443 |
+
STRIDE_H=stride[0],
|
| 444 |
+
STRIDE_W=stride[1],
|
| 445 |
+
PADDING_H=padding[0],
|
| 446 |
+
PADDING_W=padding[1],
|
| 447 |
+
GROUPS=groups,
|
| 448 |
+
# TODO(jansel): try unroll for bigger kernels once fixed:
|
| 449 |
+
# https://github.com/openai/triton/issues/1254
|
| 450 |
+
UNROLL=is_ones(kernel_shape),
|
| 451 |
+
ALLOW_TF32=torch.backends.cudnn.allow_tf32,
|
| 452 |
+
num_stages=cfg.num_stages,
|
| 453 |
+
num_warps=cfg.num_warps,
|
| 454 |
+
**cfg.kwargs,
|
| 455 |
+
)
|
| 456 |
+
|
| 457 |
+
return autotune_select_algorithm("convolution", choices, args, layout)
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
@register_lowering(aten._convolution)
|
| 461 |
+
def _convolution(
|
| 462 |
+
x,
|
| 463 |
+
weight,
|
| 464 |
+
bias,
|
| 465 |
+
stride,
|
| 466 |
+
padding,
|
| 467 |
+
dilation,
|
| 468 |
+
transposed,
|
| 469 |
+
output_padding,
|
| 470 |
+
groups,
|
| 471 |
+
benchmark,
|
| 472 |
+
deterministic,
|
| 473 |
+
cudnn_enabled,
|
| 474 |
+
allow_tf32,
|
| 475 |
+
):
|
| 476 |
+
return convolution(
|
| 477 |
+
x, weight, bias, stride, padding, dilation, transposed, output_padding, groups
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
def constrain_conv_to_fx_strides(fx_node, *args, **kwargs):
|
| 482 |
+
assert fx_node.target == torch.ops.aten.convolution.default
|
| 483 |
+
if V.graph.layout_opt:
|
| 484 |
+
return args, kwargs
|
| 485 |
+
else:
|
| 486 |
+
return constrain_to_fx_strides(fx_node, *args, **kwargs)
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
add_layout_constraint(aten.convolution, constrain_conv_to_fx_strides)
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/mm.py
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from typing import Any, Dict, List
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch._inductor.virtualized import V
|
| 6 |
+
from .. import config as inductor_config
|
| 7 |
+
from ..codegen.cuda.gemm_template import CUTLASSGemmTemplate
|
| 8 |
+
from ..lowering import register_lowering
|
| 9 |
+
from ..select_algorithm import (
|
| 10 |
+
autotune_select_algorithm,
|
| 11 |
+
ExternKernelChoice,
|
| 12 |
+
TritonTemplate,
|
| 13 |
+
)
|
| 14 |
+
from ..utils import (
|
| 15 |
+
use_aten_gemm_kernels,
|
| 16 |
+
use_cutlass_template,
|
| 17 |
+
use_max_autotune,
|
| 18 |
+
use_triton_template,
|
| 19 |
+
)
|
| 20 |
+
from .mm_common import (
|
| 21 |
+
addmm_epilogue,
|
| 22 |
+
int8_mm_configs,
|
| 23 |
+
mm_args,
|
| 24 |
+
mm_configs,
|
| 25 |
+
mm_grid,
|
| 26 |
+
mm_options,
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
log = logging.getLogger(__name__)
|
| 30 |
+
aten = torch.ops.aten
|
| 31 |
+
|
| 32 |
+
mm_template = TritonTemplate(
|
| 33 |
+
name="mm",
|
| 34 |
+
grid=mm_grid,
|
| 35 |
+
source=r"""
|
| 36 |
+
{{def_kernel("A", "B")}}
|
| 37 |
+
M = {{size("A", 0)}}
|
| 38 |
+
N = {{size("B", 1)}}
|
| 39 |
+
K = {{size("A", 1)}}
|
| 40 |
+
if M * N == 0:
|
| 41 |
+
# early exit due to zero-size input(s)
|
| 42 |
+
return
|
| 43 |
+
stride_am = {{stride("A", 0)}}
|
| 44 |
+
stride_ak = {{stride("A", 1)}}
|
| 45 |
+
stride_bk = {{stride("B", 0)}}
|
| 46 |
+
stride_bn = {{stride("B", 1)}}
|
| 47 |
+
|
| 48 |
+
# based on triton.ops.matmul
|
| 49 |
+
pid = tl.program_id(0)
|
| 50 |
+
grid_m = (M + BLOCK_M - 1) // BLOCK_M
|
| 51 |
+
grid_n = (N + BLOCK_N - 1) // BLOCK_N
|
| 52 |
+
|
| 53 |
+
# re-order program ID for better L2 performance
|
| 54 |
+
width = GROUP_M * grid_n
|
| 55 |
+
group_id = pid // width
|
| 56 |
+
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
|
| 57 |
+
pid_m = group_id * GROUP_M + (pid % group_size)
|
| 58 |
+
pid_n = (pid % width) // (group_size)
|
| 59 |
+
|
| 60 |
+
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
|
| 61 |
+
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
|
| 62 |
+
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
|
| 63 |
+
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
|
| 64 |
+
rk = tl.arange(0, BLOCK_K)
|
| 65 |
+
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
|
| 66 |
+
B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
|
| 67 |
+
|
| 68 |
+
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
|
| 69 |
+
for k in range(K, 0, -BLOCK_K):
|
| 70 |
+
if EVEN_K:
|
| 71 |
+
a = tl.load(A)
|
| 72 |
+
b = tl.load(B)
|
| 73 |
+
else:
|
| 74 |
+
a = tl.load(A, mask=rk[None, :] < k, other=0.)
|
| 75 |
+
b = tl.load(B, mask=rk[:, None] < k, other=0.)
|
| 76 |
+
if B_PROLOGUE_CAST_TYPE is not None:
|
| 77 |
+
b = b.to(B_PROLOGUE_CAST_TYPE)
|
| 78 |
+
acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
|
| 79 |
+
A += BLOCK_K * stride_ak
|
| 80 |
+
B += BLOCK_K * stride_bk
|
| 81 |
+
|
| 82 |
+
# rematerialize rm and rn to save registers
|
| 83 |
+
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
|
| 84 |
+
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
|
| 85 |
+
idx_m = rm[:, None]
|
| 86 |
+
idx_n = rn[None, :]
|
| 87 |
+
mask = (idx_m < M) & (idx_n < N)
|
| 88 |
+
|
| 89 |
+
# inductor generates a suffix
|
| 90 |
+
{{store_output(("idx_m", "idx_n"), "acc", "mask")}}
|
| 91 |
+
""",
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
aten_mm = ExternKernelChoice(torch.mm, "at::mm_out")
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
aten_addmm = ExternKernelChoice(torch.addmm, "at::addmm_out")
|
| 98 |
+
|
| 99 |
+
aten__int_mm = ExternKernelChoice(torch._int_mm, "at::_int_mm")
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def _is_int8_mat(mat):
|
| 103 |
+
return mat.get_dtype() in (torch.int8, torch.uint8)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def bias_addmm(inp, mat1, mat2, *, out=None, alpha=1, beta=1):
|
| 107 |
+
"""
|
| 108 |
+
Giving torch.addmm a 1D tensor calls a different (faster) cublasLt
|
| 109 |
+
kernel under the hood. There are a few shapes where this is slower,
|
| 110 |
+
but they are rare.
|
| 111 |
+
"""
|
| 112 |
+
if inp.stride(0) == 0 or inp.size(0) == 1:
|
| 113 |
+
return torch.addmm(inp[0], mat1, mat2, out=out, alpha=alpha, beta=beta)
|
| 114 |
+
return torch.addmm(inp, mat1, mat2, out=out, alpha=alpha, beta=beta)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
aten_bias_addmm = ExternKernelChoice(bias_addmm, None)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
@register_lowering(aten.mm, type_promotion_kind=None)
|
| 121 |
+
def tuned_mm(mat1, mat2, *, layout=None):
|
| 122 |
+
m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=layout)
|
| 123 |
+
|
| 124 |
+
# options to tune from
|
| 125 |
+
choices = [aten_mm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else []
|
| 126 |
+
|
| 127 |
+
if m * n != 0 and use_triton_template(layout):
|
| 128 |
+
for config in mm_configs(m, n, k):
|
| 129 |
+
mm_template.maybe_append_choice(
|
| 130 |
+
choices,
|
| 131 |
+
input_nodes=(mat1, mat2),
|
| 132 |
+
layout=layout,
|
| 133 |
+
**mm_options(config, k, layout),
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
if m * n != 0 and use_cutlass_template(layout):
|
| 137 |
+
CUTLASSGemmTemplate.add_cutlass_gemm_choices(
|
| 138 |
+
choices, layout, [mat1, mat2], fuseable=True, non_fuseable=True
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
from torch._inductor.ir import FixedLayout, FlexibleLayout
|
| 142 |
+
|
| 143 |
+
if (
|
| 144 |
+
len(choices) == 1
|
| 145 |
+
and use_aten_gemm_kernels()
|
| 146 |
+
and isinstance(layout, FixedLayout)
|
| 147 |
+
):
|
| 148 |
+
# If we are not autotuning, we can swap to a FlexibleLayout
|
| 149 |
+
# in order to get fusion optimizations to kick in, e.g. ConcatFusion
|
| 150 |
+
layout = FlexibleLayout(
|
| 151 |
+
device=layout.device, dtype=layout.dtype, size=layout.size
|
| 152 |
+
)
|
| 153 |
+
choices = [aten_mm.bind((mat1, mat2), layout)]
|
| 154 |
+
|
| 155 |
+
return autotune_select_algorithm("mm", choices, [mat1, mat2], layout)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
@register_lowering(aten._int_mm, type_promotion_kind=None)
|
| 159 |
+
def tuned_int_mm(mat1, mat2, *, layout=None):
|
| 160 |
+
m, n, k, layout, mat1, mat2 = mm_args(
|
| 161 |
+
mat1, mat2, layout=layout, out_dtype=torch.int32
|
| 162 |
+
)
|
| 163 |
+
choices = (
|
| 164 |
+
[aten__int_mm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else []
|
| 165 |
+
)
|
| 166 |
+
if m * n != 0 and use_triton_template(layout, enable_int32=True):
|
| 167 |
+
# TODO: Re-enable eager mode implementation once cuBLAS is fixed
|
| 168 |
+
choices = []
|
| 169 |
+
for config in int8_mm_configs(m, n, k):
|
| 170 |
+
mm_template.maybe_append_choice(
|
| 171 |
+
choices,
|
| 172 |
+
input_nodes=(mat1, mat2),
|
| 173 |
+
layout=layout,
|
| 174 |
+
**mm_options(config, k, layout),
|
| 175 |
+
)
|
| 176 |
+
return autotune_select_algorithm("int_mm", choices, [mat1, mat2], layout)
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
@register_lowering(aten.addmm, type_promotion_kind=None)
|
| 180 |
+
def tuned_addmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None):
|
| 181 |
+
ordered_kwargs_for_cpp_kernel = ("beta", "alpha")
|
| 182 |
+
|
| 183 |
+
m, n, k, layout, mat1, mat2, inp_expanded = mm_args(mat1, mat2, inp, layout=layout)
|
| 184 |
+
if m * n == 0 or not use_max_autotune():
|
| 185 |
+
choices = (
|
| 186 |
+
[
|
| 187 |
+
aten_addmm.bind(
|
| 188 |
+
(inp, mat1, mat2),
|
| 189 |
+
layout,
|
| 190 |
+
ordered_kwargs_for_cpp_kernel,
|
| 191 |
+
alpha=alpha,
|
| 192 |
+
beta=beta,
|
| 193 |
+
)
|
| 194 |
+
]
|
| 195 |
+
if use_aten_gemm_kernels()
|
| 196 |
+
else []
|
| 197 |
+
)
|
| 198 |
+
return autotune_select_algorithm("addmm", choices, [inp, mat1, mat2], layout)
|
| 199 |
+
|
| 200 |
+
choices = (
|
| 201 |
+
[
|
| 202 |
+
aten_addmm.bind(
|
| 203 |
+
(inp_expanded, mat1, mat2),
|
| 204 |
+
layout,
|
| 205 |
+
ordered_kwargs_for_cpp_kernel,
|
| 206 |
+
alpha=alpha,
|
| 207 |
+
beta=beta,
|
| 208 |
+
)
|
| 209 |
+
]
|
| 210 |
+
if use_aten_gemm_kernels()
|
| 211 |
+
else []
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
if (
|
| 215 |
+
use_aten_gemm_kernels()
|
| 216 |
+
and inp_expanded.get_stride()[0] == 0
|
| 217 |
+
and inp_expanded.get_device().type == "cuda"
|
| 218 |
+
and inductor_config.triton.autotune_cublasLt
|
| 219 |
+
):
|
| 220 |
+
# unexpand inp to make sure fused addmm from cublasLt is used
|
| 221 |
+
choices.insert(
|
| 222 |
+
0,
|
| 223 |
+
aten_bias_addmm.bind(
|
| 224 |
+
(inp_expanded, mat1, mat2), layout, alpha=alpha, beta=beta
|
| 225 |
+
),
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
if use_triton_template(layout):
|
| 229 |
+
for config in mm_configs(m, n, k):
|
| 230 |
+
mm_template.maybe_append_choice(
|
| 231 |
+
choices,
|
| 232 |
+
input_nodes=(inp_expanded, mat1, mat2),
|
| 233 |
+
layout=layout,
|
| 234 |
+
**mm_options(config, k, layout),
|
| 235 |
+
prefix_args=1,
|
| 236 |
+
epilogue_fn=addmm_epilogue(layout.dtype, alpha, beta),
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
if use_cutlass_template(layout):
|
| 240 |
+
CUTLASSGemmTemplate.add_cutlass_gemm_choices(
|
| 241 |
+
choices,
|
| 242 |
+
layout,
|
| 243 |
+
[mat1, mat2, inp_expanded],
|
| 244 |
+
alpha=alpha,
|
| 245 |
+
beta=beta,
|
| 246 |
+
input_reorder=[2, 0, 1],
|
| 247 |
+
fuseable=False,
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
return autotune_select_algorithm(
|
| 251 |
+
"addmm", choices, [inp_expanded, mat1, mat2], layout
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def fallback_mixed_mm(mat1, mat2, *, out):
|
| 256 |
+
return torch.mm(mat1, mat2.to(mat1.dtype), out=out)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
aten_fallback_mixed_mm = ExternKernelChoice(fallback_mixed_mm, None)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def tuned_mixed_mm(mat1, mat2, mat2_dtype):
|
| 263 |
+
m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=None)
|
| 264 |
+
choices = [aten_fallback_mixed_mm.bind((mat1, mat2), layout)]
|
| 265 |
+
if mat1.layout.dtype != torch.float32 and not mat2.layout.is_contiguous():
|
| 266 |
+
# can't use triton kernel unless one of these is true
|
| 267 |
+
return autotune_select_algorithm("mixed_mm", choices, [mat1, mat2], layout)
|
| 268 |
+
if inductor_config.force_mixed_mm:
|
| 269 |
+
choices = []
|
| 270 |
+
b_prologue_cast_type = f"tl.{mat2_dtype}".replace("torch.", "")
|
| 271 |
+
has_int8_tensor = _is_int8_mat(mat1) or _is_int8_mat(mat2)
|
| 272 |
+
for config in mm_configs(m, n, k, has_int8_tensor=has_int8_tensor):
|
| 273 |
+
mm_template.maybe_append_choice(
|
| 274 |
+
choices,
|
| 275 |
+
input_nodes=(mat1, mat2),
|
| 276 |
+
layout=layout,
|
| 277 |
+
**mm_options(config, k, layout, b_prologue_cast_type),
|
| 278 |
+
)
|
| 279 |
+
return autotune_select_algorithm("mixed_mm", choices, [mat1, mat2], layout)
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
# This op is a special case of the int_mm op which we use based on the pattern
|
| 283 |
+
# _int_mm -> mul (defined in ../fx_passes/post_grad.py) in order to prevent
|
| 284 |
+
# realization of the int32 _int_mm output by forcing fusion with the mul op.
|
| 285 |
+
# This is only used when config.force_fuse_int_mm_with_mul = True
|
| 286 |
+
def tuned_fused_int_mm_mul(mat1, mat2, mat3, out_dtype, *, layout=None):
|
| 287 |
+
out_dtype = (
|
| 288 |
+
torch.promote_types(mat3.get_dtype(), torch.int32)
|
| 289 |
+
if out_dtype is None
|
| 290 |
+
else out_dtype
|
| 291 |
+
)
|
| 292 |
+
m, n, k, layout, mat1, mat2, mat3 = mm_args(
|
| 293 |
+
mat1, mat2, mat3, layout=layout, out_dtype=out_dtype
|
| 294 |
+
)
|
| 295 |
+
choices: List[Dict[Any, Any]] = []
|
| 296 |
+
for config in int8_mm_configs(m, n, k):
|
| 297 |
+
mm_template.maybe_append_choice(
|
| 298 |
+
choices,
|
| 299 |
+
input_nodes=(mat1, mat2, mat3),
|
| 300 |
+
layout=layout,
|
| 301 |
+
**dict(mm_options(config, k, layout), ACC_TYPE="tl.int32"),
|
| 302 |
+
suffix_args=1,
|
| 303 |
+
epilogue_fn=V.ops.mul,
|
| 304 |
+
)
|
| 305 |
+
return autotune_select_algorithm("int_mm", choices, [mat1, mat2, mat3], layout)
|
evalkit_tf437/lib/python3.10/site-packages/torch/_inductor/kernel/mm_common.py
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import logging
|
| 3 |
+
from typing import cast, List, Tuple
|
| 4 |
+
|
| 5 |
+
import sympy
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch._inductor.select_algorithm import realize_inputs
|
| 9 |
+
from torch._inductor.virtualized import V
|
| 10 |
+
|
| 11 |
+
from ..utils import ceildiv as cdiv, next_power_of_2
|
| 12 |
+
|
| 13 |
+
log = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def triton_config(num_stages, num_warps, **kwargs):
|
| 17 |
+
from triton import Config
|
| 18 |
+
|
| 19 |
+
return Config(kwargs, num_stages=num_stages, num_warps=num_warps)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def filtered_configs(
|
| 23 |
+
m: int,
|
| 24 |
+
n: int,
|
| 25 |
+
k: int,
|
| 26 |
+
configs: List[Tuple[int, int, int, int, int]],
|
| 27 |
+
has_int8_tensor=False,
|
| 28 |
+
):
|
| 29 |
+
"""Heuristic to shrink configs when they are bigger than the input size"""
|
| 30 |
+
|
| 31 |
+
# According to https://github.com/openai/triton/issues/2156#issuecomment-1695897424
|
| 32 |
+
# it's safer to use at least [32, 32] block size for int8/uint8
|
| 33 |
+
# tensors
|
| 34 |
+
min_block_size = 32 if has_int8_tensor else 16
|
| 35 |
+
m = max(
|
| 36 |
+
next_power_of_2(
|
| 37 |
+
V.graph.sizevars.size_hint(
|
| 38 |
+
m, fallback=torch._inductor.config.unbacked_symint_fallback
|
| 39 |
+
)
|
| 40 |
+
),
|
| 41 |
+
min_block_size,
|
| 42 |
+
)
|
| 43 |
+
n = max(
|
| 44 |
+
next_power_of_2(
|
| 45 |
+
V.graph.sizevars.size_hint(
|
| 46 |
+
n, fallback=torch._inductor.config.unbacked_symint_fallback
|
| 47 |
+
)
|
| 48 |
+
),
|
| 49 |
+
min_block_size,
|
| 50 |
+
)
|
| 51 |
+
k = max(
|
| 52 |
+
next_power_of_2(
|
| 53 |
+
V.graph.sizevars.size_hint(
|
| 54 |
+
k, fallback=torch._inductor.config.unbacked_symint_fallback
|
| 55 |
+
)
|
| 56 |
+
),
|
| 57 |
+
min_block_size,
|
| 58 |
+
)
|
| 59 |
+
used = set()
|
| 60 |
+
for block_m, block_n, block_k, num_stages, num_warps in configs:
|
| 61 |
+
# shrink configs for small sizes
|
| 62 |
+
block_m = max(min(block_m, m), min_block_size)
|
| 63 |
+
block_n = max(min(block_n, n), min_block_size)
|
| 64 |
+
block_k = max(min(block_k, k), min_block_size)
|
| 65 |
+
# each warp computes 16x16 tile = 256
|
| 66 |
+
num_warps = min(num_warps, block_m * block_n // 256)
|
| 67 |
+
if (block_m, block_n, block_k, num_stages, num_warps) not in used:
|
| 68 |
+
used.add((block_m, block_n, block_k, num_stages, num_warps))
|
| 69 |
+
yield triton_config(
|
| 70 |
+
BLOCK_M=block_m,
|
| 71 |
+
BLOCK_N=block_n,
|
| 72 |
+
BLOCK_K=block_k,
|
| 73 |
+
num_stages=num_stages,
|
| 74 |
+
num_warps=num_warps,
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
# List of dictionaries to store the kernel configs. Configs that evaluate to true
|
| 79 |
+
# will be utilised on the target platform
|
| 80 |
+
mm_kernel_configs = [
|
| 81 |
+
# "BLOCK_M", "BLOCK_N", "BLOCK_K", "num_stages", "num_warps"
|
| 82 |
+
{"config": (64, 64, 32, 2, 4), "cond": True},
|
| 83 |
+
{"config": (64, 128, 32, 3, 4), "cond": True},
|
| 84 |
+
{"config": (128, 64, 32, 3, 4), "cond": True},
|
| 85 |
+
{"config": (64, 128, 32, 4, 8), "cond": True},
|
| 86 |
+
{"config": (128, 64, 32, 4, 8), "cond": True},
|
| 87 |
+
{"config": (64, 32, 32, 5, 8), "cond": True},
|
| 88 |
+
{"config": (32, 64, 32, 5, 8), "cond": True},
|
| 89 |
+
{"config": (128, 128, 32, 2, 8), "cond": True},
|
| 90 |
+
{"config": (64, 64, 64, 3, 8), "cond": True},
|
| 91 |
+
{"config": (32, 32, 128, 2, 4), "cond": torch.version.hip is None},
|
| 92 |
+
{"config": (64, 64, 16, 2, 4), "cond": True},
|
| 93 |
+
{"config": (32, 32, 16, 1, 2), "cond": True},
|
| 94 |
+
]
|
| 95 |
+
|
| 96 |
+
int8_mm_kernel_configs = [
|
| 97 |
+
{"config": (64, 64, 32, 2, 4), "cond": True},
|
| 98 |
+
{"config": (64, 128, 32, 3, 4), "cond": True},
|
| 99 |
+
{"config": (128, 64, 32, 3, 4), "cond": True},
|
| 100 |
+
{"config": (64, 128, 32, 4, 8), "cond": True},
|
| 101 |
+
{"config": (128, 64, 32, 4, 8), "cond": True},
|
| 102 |
+
{"config": (64, 32, 32, 5, 8), "cond": True},
|
| 103 |
+
{"config": (32, 64, 32, 5, 8), "cond": True},
|
| 104 |
+
{"config": (128, 128, 32, 2, 8), "cond": True},
|
| 105 |
+
{"config": (64, 64, 64, 3, 8), "cond": True},
|
| 106 |
+
# {"config": (32, 32, 128, 2, 4), "cond": True},
|
| 107 |
+
# {"config": (64, 64, 16, 2, 4), "cond": True},
|
| 108 |
+
# {"config": (32, 32, 16, 1, 2), "cond": True},
|
| 109 |
+
{"config": (128, 256, 128, 3, 8), "cond": torch.version.hip is None},
|
| 110 |
+
{"config": (256, 128, 128, 3, 8), "cond": torch.version.hip is None},
|
| 111 |
+
]
|
| 112 |
+
|
| 113 |
+
# Create filtered list of configs based on cond evaluation
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
mm_platform_configs = tuple(
|
| 117 |
+
cast(Tuple[int, int, int, int, int], config["config"])
|
| 118 |
+
for config in mm_kernel_configs
|
| 119 |
+
if config["cond"]
|
| 120 |
+
)
|
| 121 |
+
int8_platform_configs = tuple(
|
| 122 |
+
cast(Tuple[int, int, int, int, int], config["config"])
|
| 123 |
+
for config in int8_mm_kernel_configs
|
| 124 |
+
if config["cond"]
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
# On ROCm convert num_stages to 1 as pipelining provides no benefit
|
| 128 |
+
if torch.version.hip:
|
| 129 |
+
mm_platform_configs = tuple(
|
| 130 |
+
(config[0], config[1], config[2], 1, config[4])
|
| 131 |
+
for config in mm_platform_configs
|
| 132 |
+
)
|
| 133 |
+
int8_platform_configs = tuple(
|
| 134 |
+
(config[0], config[1], config[2], 1, config[4])
|
| 135 |
+
for config in mm_platform_configs
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
mm_configs = functools.partial(
|
| 139 |
+
filtered_configs,
|
| 140 |
+
configs=mm_platform_configs,
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
int8_mm_configs = functools.partial(
|
| 144 |
+
filtered_configs,
|
| 145 |
+
configs=int8_platform_configs,
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def mm_grid(m, n, meta):
|
| 150 |
+
"""
|
| 151 |
+
The CUDA grid size for matmul triton templates.
|
| 152 |
+
"""
|
| 153 |
+
return (cdiv(m, meta["BLOCK_M"]) * cdiv(n, meta["BLOCK_N"]), 1, 1)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def acc_type(dtype):
|
| 157 |
+
if dtype in (torch.float16, torch.bfloat16):
|
| 158 |
+
return "tl.float32"
|
| 159 |
+
return f"tl.{dtype}".replace("torch.", "")
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def mm_options(config, sym_k, layout, b_prologue_cast_type=None):
|
| 163 |
+
"""
|
| 164 |
+
Common options to matmul triton templates.
|
| 165 |
+
"""
|
| 166 |
+
even_k_symbolic = (
|
| 167 |
+
# it isn't worth guarding on this
|
| 168 |
+
sympy.gcd(sym_k, config.kwargs["BLOCK_K"])
|
| 169 |
+
== config.kwargs["BLOCK_K"]
|
| 170 |
+
)
|
| 171 |
+
return dict(
|
| 172 |
+
GROUP_M=8,
|
| 173 |
+
EVEN_K=even_k_symbolic,
|
| 174 |
+
ALLOW_TF32=torch.backends.cuda.matmul.allow_tf32,
|
| 175 |
+
ACC_TYPE=acc_type(layout.dtype),
|
| 176 |
+
B_PROLOGUE_CAST_TYPE=b_prologue_cast_type,
|
| 177 |
+
num_stages=config.num_stages,
|
| 178 |
+
num_warps=config.num_warps,
|
| 179 |
+
**config.kwargs,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def mm_args(mat1, mat2, *others, layout=None, out_dtype=None, use_4x2_dim=False):
|
| 184 |
+
"""
|
| 185 |
+
Common arg processing for mm,bmm,addmm,etc
|
| 186 |
+
"""
|
| 187 |
+
mat1, mat2 = realize_inputs(mat1, mat2)
|
| 188 |
+
*b1, m, k1 = mat1.get_size()
|
| 189 |
+
*b2, k2, n = mat2.get_size()
|
| 190 |
+
b = [V.graph.sizevars.guard_equals(a, b) for a, b in zip(b1, b2)]
|
| 191 |
+
if use_4x2_dim:
|
| 192 |
+
k2 = k2 * 2
|
| 193 |
+
k = V.graph.sizevars.guard_equals(k1, k2)
|
| 194 |
+
if layout is None:
|
| 195 |
+
from torch._inductor.ir import FixedLayout
|
| 196 |
+
|
| 197 |
+
if out_dtype is None:
|
| 198 |
+
out_dtype = mat1.get_dtype()
|
| 199 |
+
layout = FixedLayout(
|
| 200 |
+
mat1.get_device(),
|
| 201 |
+
out_dtype,
|
| 202 |
+
[*b, m, n],
|
| 203 |
+
)
|
| 204 |
+
else:
|
| 205 |
+
assert out_dtype is None, "out_dtype is ignored if layout is specified."
|
| 206 |
+
|
| 207 |
+
from ..lowering import expand
|
| 208 |
+
|
| 209 |
+
others = [realize_inputs(expand(x, layout.size)) for x in others]
|
| 210 |
+
|
| 211 |
+
return [m, n, k, layout, mat1, mat2, *others]
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def addmm_epilogue(dtype, alpha, beta):
|
| 215 |
+
def epilogue(acc, bias):
|
| 216 |
+
if alpha != 1:
|
| 217 |
+
acc = V.ops.mul(acc, V.ops.constant(alpha, dtype))
|
| 218 |
+
if beta != 1:
|
| 219 |
+
bias = V.ops.mul(bias, V.ops.constant(beta, dtype))
|
| 220 |
+
return V.ops.add(acc, bias)
|
| 221 |
+
|
| 222 |
+
return epilogue
|
evalkit_tf437/lib/python3.10/site-packages/torch/_lazy/__pycache__/tensor_factory_functions.cpython-310.pyc
ADDED
|
Binary file (723 Bytes). View file
|
|
|