repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_nvfp4_quant.py
tests/kernels/quantization/test_nvfp4_quant.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from vllm import _custom_ops as ops from vllm.platforms import current_platform from vllm.scalar_type import scalar_types if not current_platform.has_device_capability(100): pytest.skip( reason="Nvfp4 Requires compute capability of 10 or above.", allow_module_level=True, ) DTYPES = [torch.float16, torch.bfloat16] SHAPES = [(128, 64), (128, 128), (256, 64), (256, 128)] PAD_SHAPES = [ (90, 64), (150, 64), (128, 48), (128, 80), (150, 80), (90, 48), (90, 128), (150, 128), (150, 48), (90, 80), ] SEEDS = [42] CUDA_DEVICES = ["cuda:0"] FLOAT4_E2M1_MAX = scalar_types.float4_e2m1f.max() FLOAT8_E4M3_MAX = torch.finfo(torch.float8_e4m3fn).max # E2M1 to float # 0111 -> 6 # 0110 -> 4 # 0101 -> 3 # 0100 -> 2 # 0011 -> 1.5 # 0010 -> 1 # 0001 -> 0.5 # 0000 -> 0 E2M1_TO_FLOAT32 = [ 0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 6.0, 0.0, -0.5, -1.0, -1.5, -2.0, -3.0, -4.0, -6.0, ] BLOCK_SIZE = 16 def cast_from_fp4(x, m, n): # The fp4 values are packed in uint8 as [v_1st | v_2nd] v_2nd = x & 0xF v_1st = (x >> 4) & 0xF c = torch.stack((v_2nd, v_1st), dim=-1) out = torch.tensor([E2M1_TO_FLOAT32[x] for x in c.flatten()]) out = out.reshape(m, n).to(torch.float32) return out def cast_to_fp4(x): sign = torch.sign(x) x = torch.abs(x) x[(x >= 0.0) & (x <= 0.25)] = 0.0 x[(x > 0.25) & (x < 0.75)] = 0.5 x[(x >= 0.75) & (x <= 1.25)] = 1.0 x[(x > 1.25) & (x < 1.75)] = 1.5 x[(x >= 1.75) & (x <= 2.5)] = 2.0 x[(x > 2.5) & (x < 3.5)] = 3.0 x[(x >= 3.5) & (x <= 5.0)] = 4.0 x[x > 5.0] = 6.0 return x * sign def get_reciprocal(x): if isinstance(x, torch.Tensor): return torch.where(x == 0, torch.tensor(0.0, dtype=x.dtype), 1.0 / x) elif isinstance(x, (float, int)): return 0.0 if x == 0 else 1.0 / x else: raise TypeError("Input must be a float, int, or a torch.Tensor.") def ref_nvfp4_quant(x, global_scale): assert global_scale.dtype == torch.float32 assert x.ndim == 2 m, n = x.shape x = torch.reshape(x, (m, n // BLOCK_SIZE, BLOCK_SIZE)) vec_max = torch.max(torch.abs(x), dim=-1, keepdim=True)[0].to(torch.float32) scale = global_scale * (vec_max * get_reciprocal(FLOAT4_E2M1_MAX)) scale = scale.to(torch.float8_e4m3fn).to(torch.float32) output_scale = get_reciprocal(scale * get_reciprocal(global_scale)) scaled_x = x.to(torch.float32) * output_scale clipped_x = torch.clamp(scaled_x, -6.0, 6.0).reshape(m, n) return cast_to_fp4(clipped_x), scale.squeeze(-1) def recover_swizzled_scales(scale, m, n): round_up = lambda x, y: (x + y - 1) // y * y rounded_m = round_up(m, 128) scale_n = n // BLOCK_SIZE rounded_n = round_up(scale_n, 4) # Recover the swizzled scaling factor to linear layout tmp = torch.reshape(scale, (1, rounded_m // 128, rounded_n // 4, 32, 4, 4)) tmp = torch.permute(tmp, (0, 1, 4, 3, 2, 5)) result = torch.reshape(tmp, (rounded_m, rounded_n)).to(torch.float32) return result[:m, :scale_n] @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("shape", SHAPES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.parametrize("device", CUDA_DEVICES) @torch.inference_mode() def test_quantize_to_fp4( dtype: torch.dtype, shape: tuple[int, int], seed: int, device: str, ) -> None: current_platform.seed_everything(seed) torch.set_default_device(device) m, n = shape x = torch.randn((m, n), dtype=dtype) tensor_amax = torch.abs(x).max().to(torch.float32) global_scale = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / tensor_amax out_ref, scale_ref = ref_nvfp4_quant(x, global_scale) out, out_scale = ops.scaled_fp4_quant(x, global_scale) scale_ans = recover_swizzled_scales(out_scale, m, n) out_ans = cast_from_fp4(out, m, n) torch.testing.assert_close(out_ans, out_ref) torch.testing.assert_close(scale_ans, scale_ref) @pytest.mark.parametrize("pad_shape", PAD_SHAPES) @torch.inference_mode() def test_quantize_to_fp4_padded(pad_shape: tuple[int, int]) -> None: dtype = torch.float16 current_platform.seed_everything(42) torch.set_default_device("cuda:0") m, n = pad_shape x = torch.randn((m, n), dtype=dtype) tensor_amax = torch.abs(x).max().to(torch.float32) global_scale = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / tensor_amax out_ref, scale_ref = ref_nvfp4_quant(x, global_scale) out, out_scale = ops.scaled_fp4_quant(x, global_scale) scale_ans = recover_swizzled_scales(out_scale, m, n) out_ans = cast_from_fp4(out, m, n) torch.testing.assert_close(out_ans, out_ref) torch.testing.assert_close(scale_ans, scale_ref)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_per_token_group_quant.py
tests/kernels/quantization/test_per_token_group_quant.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from unittest.mock import patch import pytest import torch from vllm.model_executor.layers.quantization.utils import fp8_utils, int8_utils @pytest.mark.parametrize("shape", [(32, 128), (64, 256), (16, 512)]) @pytest.mark.parametrize("column_major", [False, True]) @pytest.mark.parametrize("scale_ue8m0", [False, True]) @pytest.mark.parametrize("group_size", [64, 128]) @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_per_token_group_quant_fp8( shape, column_major: bool, scale_ue8m0: bool, group_size: int ): device = "cuda" torch.manual_seed(42) num_tokens, hidden_dim = shape x = torch.randn((num_tokens, hidden_dim), device=device, dtype=torch.bfloat16) * 8 # cuda path out_q, scale = fp8_utils.per_token_group_quant_fp8( x, group_size, column_major_scales=column_major, use_ue8m0=scale_ue8m0, ) # triton ref with patch("vllm.platforms.current_platform.is_cuda", return_value=False): ref_q, ref_s = fp8_utils.per_token_group_quant_fp8( x, group_size, column_major_scales=column_major, use_ue8m0=scale_ue8m0, ) assert torch.allclose(out_q.float(), ref_q.float(), atol=0.15, rtol=0.15) assert torch.allclose(scale, ref_s, atol=0.01, rtol=0.01) @pytest.mark.parametrize("shape", [(32, 128), (64, 256), (16, 512)]) @pytest.mark.parametrize("group_size", [64, 128]) @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") def test_per_token_group_quant_int8(shape, group_size: int): device = "cuda" torch.manual_seed(42) num_tokens, hidden_dim = shape x = torch.randn((num_tokens, hidden_dim), device=device, dtype=torch.bfloat16) * 8 # cuda path out_q, scale = int8_utils.per_token_group_quant_int8( x, group_size, ) # triton ref with patch("vllm.platforms.current_platform.is_cuda", return_value=False): ref_q, ref_s = int8_utils.per_token_group_quant_int8( x, group_size, ) assert torch.allclose(out_q.float(), ref_q.float(), atol=0.15, rtol=0.15) assert torch.allclose(scale, ref_s, atol=0.01, rtol=0.01)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_cutlass_2of4_sparse.py
tests/kernels/quantization/test_cutlass_2of4_sparse.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Tests for sparse cutlass kernels Run `pytest tests/kernels/quantization/test_cutlass_2of4_sparse.py`. """ import pytest import torch from tests.kernels.utils import baseline_scaled_mm, to_fp8, to_int8 from vllm import _custom_ops as ops from vllm.model_executor.layers.quantization.utils.w8a8_utils import ( sparse_cutlass_supported, ) from vllm.platforms import current_platform CUDA_DEVICES = [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)] capability = current_platform.get_device_capability() capability = capability[0] * 10 + capability[1] def to_bf16(tensor: torch.Tensor) -> torch.Tensor: return tensor.to(dtype=torch.bfloat16) def to_fp16(tensor: torch.Tensor) -> torch.Tensor: return tensor.to(dtype=torch.float16) def prune_to_2_4(tensor): # Reshape tensor to [N, 4] where N is number of groups of 4 original_shape = tensor.shape reshaped = tensor.reshape(-1, 4) # Get indices of top 2 absolute values in each group of 4 _, indices = torch.topk(torch.abs(reshaped), k=2, dim=1) # Create binary mask mask = torch.zeros_like(reshaped) mask.scatter_(dim=1, index=indices, src=torch.ones_like(indices, dtype=mask.dtype)) # Apply mask and reshape back pruned = reshaped * mask # Turn all -0.0 to 0.0 pruned[pruned == -0.0] = 0.0 return pruned.reshape(original_shape) # This function checks that applying an identity matrix multiplication # to the compressed weights yields the original uncompressed weights. def check_compress_decompress_invariance( dtype: torch.dtype, b: torch.Tensor, b_compressed: torch.Tensor, b_metadata: torch.Tensor, ): # For float16 and bfloat16, cutlass_scaled_sparse_mm's output must be the # same dtype as its inputs. This line addresses that constraint while # arbitrarily using bfloat16 for the int8/fp8 cases. out_dtype = torch.float16 if dtype is torch.float16 else torch.bfloat16 eye = torch.eye(b.shape[0], device="cuda", dtype=dtype) eye_scale = torch.ones(1, device="cuda", dtype=torch.float32) b_decomp = ops.cutlass_scaled_sparse_mm( eye, b_compressed, b_metadata, eye_scale, eye_scale, out_dtype=out_dtype ) torch.testing.assert_close(b.to(dtype=out_dtype), b_decomp) def make_rand_sparse_tensors( dtype: torch.dtype, m: int, n: int, k: int ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: a = torch.randn((m, k), device="cuda") b = torch.randn((n, k), device="cuda").t() if dtype == torch.int8: # ensure A and B aren't all zeros after rounding a = a * 5.0 b = b * 5.0 b = prune_to_2_4(b.t()).t() if dtype == torch.int8: a, b = to_int8(a), to_int8(b) elif dtype == torch.float8_e4m3fn: a, b = to_fp8(a), to_fp8(b) elif dtype == torch.float16: a, b = to_fp16(a), to_fp16(b) elif dtype == torch.bfloat16: a, b = to_bf16(a), to_bf16(b) else: raise ValueError("unsupported dtype") b_compressed, e = ops.cutlass_sparse_compress(b.t()) check_compress_decompress_invariance(dtype, b, b_compressed, e) # Compressed B, Metadata, Original A, B return b_compressed, e, a, b @pytest.mark.skipif( not sparse_cutlass_supported(), reason="Sparse CUTLASS is not supported on this GPU type.", ) # Test working with a subset of A and B for sparse matmul def test_cutlass_sparse_subset(): big_m = 1024 m, n, k = 512, 512, 512 # Create tensors b_comp, e, whole_a, b = make_rand_sparse_tensors(torch.float8_e4m3fn, big_m, n, k) a = whole_a[0:m, 0:k] scale_a = torch.randn((1, 1), device="cuda", dtype=torch.float32) / 10 scale_b = torch.randn((1, 1), device="cuda", dtype=torch.float32) / 10 out = ops.cutlass_scaled_sparse_mm( a, b_comp, e, scale_a, scale_b, out_dtype=torch.bfloat16 ) baseline = baseline_scaled_mm(a, b, scale_a, scale_b, out_dtype=torch.bfloat16) torch.testing.assert_close(out, baseline, rtol=1e-1, atol=1e0) MNK_FACTORS = [ (1, 256, 128), (1, 16384, 1024), (1, 24576, 512), (16, 256, 512), (16, 16384, 128), (16, 24576, 4096), (32, 8192, 4096), (32, 16384, 4096), (33, 1024, 1024), (33, 8192, 128), (64, 2048, 512), (64, 16384, 1024), (100, 8192, 512), (128, 32768, 4096), (256, 4096, 4096), (512, 256, 1024), (512, 8192, 4096), (512, 16384, 128), (512, 24576, 128), ] # Test working with a subset of A and B for sparse matmul @pytest.mark.skipif( not sparse_cutlass_supported(), reason="Sparse CUTLASS is not supported on this GPU type.", ) @pytest.mark.parametrize("m, n, k", MNK_FACTORS) @pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float16]) @pytest.mark.parametrize("use_bias", [True, False]) def test_cutlass_sparse_gemm( m: int, k: int, n: int, dtype: type[torch.dtype], use_bias: bool ): # Create tensors b_comp, e, a, b = make_rand_sparse_tensors(dtype, m, n, k) scale_a = torch.ones((1, 1), device="cuda", dtype=torch.float32) scale_b = torch.ones((1, 1), device="cuda", dtype=torch.float32) bias = torch.rand((n,), device="cuda", dtype=dtype) if use_bias else None out = ops.cutlass_scaled_sparse_mm( a, b_comp, e, scale_a, scale_b, out_dtype=dtype, bias=bias ) baseline = baseline_scaled_mm(a, b, scale_a, scale_b, out_dtype=dtype, bias=bias) torch.testing.assert_close(out, baseline, rtol=1e-2, atol=3e-1) @pytest.mark.skipif( not sparse_cutlass_supported(), reason="Sparse CUTLASS is not supported on this GPU type.", ) @pytest.mark.parametrize("m, k, n", MNK_FACTORS) @pytest.mark.skipif( not current_platform.has_device_capability(89), reason="FP8 is not supported on this GPU type.", ) @pytest.mark.parametrize("use_bias", [True, False]) def test_cutlass_sparse_fp8_gemm(m: int, n: int, k: int, use_bias: bool): # Create tensors b_comp, e, a, b = make_rand_sparse_tensors(torch.float8_e4m3fn, m, n, k) scale_a = torch.randn((1, 1), device="cuda", dtype=torch.float32) scale_b = torch.randn((1, 1), device="cuda", dtype=torch.float32) out_dtype = torch.bfloat16 bias = torch.rand((n,), device="cuda", dtype=out_dtype) * 10 if use_bias else None out = ops.cutlass_scaled_sparse_mm( a, b_comp, e, scale_a, scale_b, out_dtype=out_dtype, bias=bias ) baseline = baseline_scaled_mm( a, b, scale_a, scale_b, out_dtype=out_dtype, bias=bias ) torch.testing.assert_close(out, baseline, rtol=1e-2, atol=3e-1) @pytest.mark.skipif( not sparse_cutlass_supported(), reason="Sparse CUTLASS is not supported on this GPU type.", ) @pytest.mark.parametrize("m,k,n", MNK_FACTORS) @pytest.mark.parametrize("per_act_token", [True, False]) @pytest.mark.parametrize("per_out_ch", [True, False]) @pytest.mark.parametrize("use_bias", [True, False]) def test_cutlass_sparse_int8_gemm( m: int, n: int, k: int, per_act_token: bool, per_out_ch: bool, use_bias: bool ): # Create tensors b_comp, e, a, b = make_rand_sparse_tensors(torch.int8, m, n, k) scale_a = torch.randn((1, 1), device="cuda", dtype=torch.float32) scale_b = torch.randn((1, 1), device="cuda", dtype=torch.float32) out_dtype = torch.bfloat16 bias = torch.rand((n,), device="cuda", dtype=out_dtype) * 10 if use_bias else None out = ops.cutlass_scaled_sparse_mm( a, b_comp, e, scale_a, scale_b, out_dtype=out_dtype, bias=bias ) baseline = baseline_scaled_mm( a, b, scale_a, scale_b, out_dtype=out_dtype, bias=bias ) torch.testing.assert_close(out, baseline, rtol=1e0, atol=2e0)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_gptq.py
tests/kernels/quantization/test_gptq.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import torch from tests.kernels.utils import opcheck from vllm import _custom_ops as ops # noqa: F401 def test_gptq_shuffle_opcheck(): weight = torch.randint( -2000000, 2000000, (1792, 4096), device="cuda", dtype=torch.int32 ) perm = torch.empty((0,), device="cuda", dtype=torch.int32) bit = 4 opcheck(torch.ops._C.gptq_shuffle, (weight, perm, bit)) def test_gptq_gemm_opcheck(): a = torch.rand((240, 4096), device="cuda", dtype=torch.float16) weight = torch.randint( -2000000, 2000000, (512, 6144), device="cuda", dtype=torch.int32 ) zeros = torch.zeros((32, 768), device="cuda", dtype=torch.int32) scales = torch.rand((32, 6144), device="cuda", dtype=torch.float16) idx = torch.empty((0,), device="cuda", dtype=torch.int32) use_exllama = True bit = 4 # Test both GPTQv1 and GPTQv2 format opcheck( torch.ops._C.gptq_gemm, (a, weight, zeros, scales, idx, use_exllama, True, bit) ) opcheck( torch.ops._C.gptq_gemm, (a, weight, zeros, scales, idx, use_exllama, False, bit) )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_nvfp4_scaled_mm.py
tests/kernels/quantization/test_nvfp4_scaled_mm.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from nvfp4_utils import FLOAT4_E2M1_MAX, FLOAT8_E4M3_MAX, dequantize_nvfp4_to_dtype from vllm import _custom_ops as ops from vllm.platforms import current_platform if not current_platform.has_device_capability(100): pytest.skip( reason="Nvfp4 Requires compute capability of 10 or above.", allow_module_level=True, ) DTYPES = [torch.float16, torch.bfloat16] # m, n, k SHAPES = [(128, 128, 64), (128, 128, 128), (256, 128, 64), (128, 256, 128)] PAD_SHAPES = [(150, 128, 64), (128, 128, 96)] SHAPES.extend(PAD_SHAPES) SEEDS = [42] CUDA_DEVICES = ["cuda:0"] def get_ref_results( a_fp4, b_fp4, a_sf, b_sf, a_global_scale, b_global_scale, m, n, dtype, block_size, device, ): _, m_k = a_fp4.shape _, n_k = b_fp4.shape assert m_k == n_k a_in_dtype = dequantize_nvfp4_to_dtype( a_fp4, a_sf, a_global_scale, dtype=dtype, device=device, block_size=block_size ) b_in_dtype = dequantize_nvfp4_to_dtype( b_fp4, b_sf, b_global_scale, dtype=dtype, device=device, block_size=block_size ) return torch.matmul(a_in_dtype, b_in_dtype.t()) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("shape", SHAPES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.parametrize("device", CUDA_DEVICES) @torch.inference_mode() def test_nvfp4_gemm( dtype: torch.dtype, shape: tuple[int, int, int], seed: int, device: str, ) -> None: current_platform.seed_everything(seed) m, n, packed_k = shape k = packed_k * 2 block_size = 16 a_dtype = torch.randn((m, k), dtype=dtype, device=device) b_dtype = torch.randn((n, k), dtype=dtype, device=device) a_global_scale = ( (FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX) / torch.amax(a_dtype.flatten(), dim=-1) ).to(torch.float32) b_global_scale = ( (FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX) / torch.amax(b_dtype.flatten(), dim=-1) ).to(torch.float32) alpha = 1.0 / (a_global_scale * b_global_scale) # ops.scaled_fp4_quant returns swizzled scales, while weights # from checkpoints are in linear scales. a_fp4, a_scale_interleaved = ops.scaled_fp4_quant(a_dtype, a_global_scale) b_fp4, b_scale_interleaved = ops.scaled_fp4_quant(b_dtype, b_global_scale) # get_ref_results unswizzles the scales internally. expected_out = get_ref_results( a_fp4, b_fp4, a_scale_interleaved, b_scale_interleaved, a_global_scale, b_global_scale, m, n, dtype, block_size, device, ) out = ops.cutlass_scaled_fp4_mm( a_fp4, b_fp4, a_scale_interleaved, b_scale_interleaved, alpha, dtype ) torch.testing.assert_close(out, expected_out.to(dtype=dtype), atol=1e-1, rtol=1e-1)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_awq_triton.py
tests/kernels/quantization/test_awq_triton.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Tests for the AWQ Triton kernel. Run `pytest tests/kernels/quantization/test_awq_triton.py`. """ import pytest import torch from vllm.model_executor.layers.quantization.awq_triton import ( AWQ_TRITON_SUPPORTED_GROUP_SIZES, awq_dequantize_triton, awq_gemm_triton, ) from vllm.platforms import current_platform device = "cuda" def reverse_awq_order(t: torch.Tensor): bits = 4 AWQ_REVERSE_ORDER = [0, 4, 1, 5, 2, 6, 3, 7] reverse_order_tensor = torch.arange( t.shape[-1], dtype=torch.int32, device=t.device, ) reverse_order_tensor = reverse_order_tensor.view(-1, 32 // bits) reverse_order_tensor = reverse_order_tensor[:, AWQ_REVERSE_ORDER] reverse_order_tensor = reverse_order_tensor.view(-1) t = t[:, reverse_order_tensor] & 0xF return t # qweights - [R , C // 8], int32 # scales - [R // G, C ], float16 # zeros - [R // G, C // 8], int32 def awq_dequantize_torch( qweight: torch.Tensor, scales: torch.Tensor, qzeros: torch.Tensor, group_size: int ) -> torch.Tensor: if group_size == -1: group_size = qweight.shape[0] bits = 4 shifts = torch.arange(0, 32, bits, device=qzeros.device) iweights = torch.bitwise_right_shift(qweight[:, :, None], shifts[None, None, :]).to( torch.int8 ) iweights = iweights.view(iweights.shape[0], -1) zeros = torch.bitwise_right_shift(qzeros[:, :, None], shifts[None, None, :]).to( torch.int8 ) zeros = zeros.view(qzeros.shape[0], -1) zeros = reverse_awq_order(zeros) iweights = reverse_awq_order(iweights) iweights = torch.bitwise_and(iweights, (2**bits) - 1) zeros = torch.bitwise_and(zeros, (2**bits) - 1) scales = scales.repeat_interleave(group_size, dim=0) zeros = zeros.repeat_interleave(group_size, dim=0) return (iweights - zeros) * scales # qweights - [R , C // 8], int32 # scales - [R // G, C ], float16 # zeros - [R // G, C // 8], int32 @pytest.mark.parametrize("qweight_rows", [3584, 18944, 128, 256, 512, 1024]) @pytest.mark.parametrize("qweight_cols", [448, 576, 4736, 16, 32, 64, 128]) @pytest.mark.parametrize("group_size", AWQ_TRITON_SUPPORTED_GROUP_SIZES) def test_dequantize(qweight_rows, qweight_cols, group_size): if group_size == -1: group_size = qweight_rows qweight_dtype = torch.int32 scales_rows = qweight_rows // group_size scales_cols = qweight_cols * 8 scales_dtype = torch.float16 zeros_rows = scales_rows zeros_cols = qweight_cols zeros_dtype = torch.int32 current_platform.seed_everything(0) qweight = torch.randint( 0, torch.iinfo(torch.int32).max, (qweight_rows, qweight_cols), dtype=qweight_dtype, device=device, ) scales = torch.rand(scales_rows, scales_cols, dtype=scales_dtype, device=device) zeros = torch.randint( 0, torch.iinfo(torch.int32).max, (zeros_rows, zeros_cols), dtype=zeros_dtype, device=device, ) iweights_triton = awq_dequantize_triton(qweight, scales, zeros) assert not torch.any(torch.isinf(iweights_triton)) and not torch.any( torch.isnan(iweights_triton) ) iweights_torch = awq_dequantize_torch(qweight, scales, zeros, group_size) torch.testing.assert_close(iweights_triton, iweights_torch) # input - [N, K] # qweight - [K, M // 8] # qzeros - [K // G, M // 8] # scales - [K // G, M] @pytest.mark.parametrize("N", [1, 2, 4, 8, 14, 17, 23, 32]) @pytest.mark.parametrize("K", [128]) @pytest.mark.parametrize("M", [16, 24, 32]) @pytest.mark.parametrize("group_size", AWQ_TRITON_SUPPORTED_GROUP_SIZES) @pytest.mark.parametrize("splitK", [1, 8]) def test_gemm(N, K, M, splitK, group_size): if group_size == -1: group_size = K split_k_iters = splitK input_rows = N input_cols = K input_dtype = torch.float32 qweight_rows = input_cols qweight_cols = M // 8 scales_rows = qweight_rows // group_size scales_cols = M scales_dtype = torch.float32 qzeros_rows = scales_rows qzeros_cols = qweight_cols current_platform.seed_everything(0) input = torch.rand((input_rows, input_cols), dtype=input_dtype, device=device) qweight = torch.randint( 0, torch.iinfo(torch.int32).max, (qweight_rows, qweight_cols), device=device ) qzeros = torch.randint( 0, torch.iinfo(torch.int32).max, (qzeros_rows, qzeros_cols), device=device ) scales = torch.rand((scales_rows, scales_cols), dtype=scales_dtype, device=device) output_triton = awq_gemm_triton(input, qweight, scales, qzeros, split_k_iters) assert not torch.any(torch.isinf(output_triton)) and not torch.any( torch.isnan(output_triton) ) dequantized_weights = awq_dequantize_triton(qweight, scales, qzeros) output_torch = torch.matmul(input, dequantized_weights) assert not torch.any(torch.isinf(output_torch)) and not torch.any( torch.isnan(output_torch) ) torch.testing.assert_close( output_triton.cpu(), output_torch.cpu(), atol=1e-1, rtol=1e-1 )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_allspark_gemm.py
tests/kernels/quantization/test_allspark_gemm.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from tests.kernels.utils import DEFAULT_OPCHECK_TEST_UTILS, opcheck from vllm import _custom_ops as ops from vllm.model_executor.layers.quantization.utils.allspark_utils import ( ALLSPARK_AMPERE_K_ALIGN, ALLSPARK_AMPERE_M_CUBLAS_THRESHOLD, ALLSPARK_AMPERE_N_ALIGN, ) from vllm.model_executor.layers.quantization.utils.quant_utils import quantize_weights from vllm.platforms import current_platform from vllm.scalar_type import scalar_types def is_gptq_allspark_supported(min_capability: int, max_capability: int) -> bool: if not current_platform.is_cuda(): return False capability = current_platform.get_device_capability() assert capability is not None return ( capability.to_int() >= min_capability and capability.to_int() <= max_capability ) MNK_FACTORS = [ (1, 4, 8), (13, 17, 67), (26, 37, 13), (48, 16, 24), (67, 13, 88), (257, 13, 11), (658, 13, 11), (1033, 9, 17), ] DTYPES = [torch.float16, torch.bfloat16] HAS_ZP_OPTS = [False, True] def compute_max_diff(output, output_ref): return torch.mean(torch.abs(output - output_ref)) / torch.mean( torch.abs(output_ref) ) def rand_data(shape, dtype=torch.float16): return torch.randn(shape, dtype=dtype, device="cuda") @pytest.mark.skipif( not is_gptq_allspark_supported(80, 89), reason="AllSpark Ampere kernel is not supported on this GPU type.", ) @pytest.mark.parametrize("mnk_factors", MNK_FACTORS) @pytest.mark.parametrize("group_size", [-1]) @pytest.mark.parametrize("has_zp", HAS_ZP_OPTS) @pytest.mark.parametrize("dtype", DTYPES) def test_gptq_allspark_gemm_ampere(mnk_factors, group_size, has_zp, dtype): m_factor, n_factor, k_factor = mnk_factors m = m_factor n = n_factor * ALLSPARK_AMPERE_N_ALIGN k = k_factor * ALLSPARK_AMPERE_K_ALIGN input = rand_data((m, k), dtype=dtype) weight = rand_data((k, n), dtype=dtype) # Quantize (and apply act_order if provided) w_ref, qw, s, zp = quantize_weights( weight, scalar_types.uint8b128, group_size, has_zp ) qw = qw.to(torch.uint8) if has_zp: zp = zp.to(dtype) properties = torch.cuda.get_device_properties(qw.device.index) sm_count = properties.multi_processor_count sm_version = properties.major * 10 + properties.minor n_32align = (n + 32 - 1) // 32 * 32 qw_reorder, s_reorder, zp_reorder = ops.allspark_repack_weight(qw, s, zp, has_zp) opcheck( torch.ops._C.rearrange_kn_weight_as_n32k16_order, (qw, s, zp, has_zp, qw_reorder, s_reorder, zp_reorder, k, n, n_32align), ) opcheck( torch.ops._C.allspark_w8a16_gemm, ( input, qw_reorder, s_reorder, zp_reorder, n, group_size, sm_count, sm_version, ALLSPARK_AMPERE_M_CUBLAS_THRESHOLD, has_zp, True, ), test_utils=DEFAULT_OPCHECK_TEST_UTILS, ) output = ops.allspark_w8a16_gemm( input, qw_reorder, s_reorder, zp_reorder, n, group_size, sm_count, sm_version, ALLSPARK_AMPERE_M_CUBLAS_THRESHOLD, has_zp, True, ) output_ref = torch.matmul(input, w_ref) torch.cuda.synchronize() max_diff = compute_max_diff(output, output_ref) assert max_diff < 0.04
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/nvfp4_utils.py
tests/kernels/quantization/nvfp4_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import torch from vllm._custom_ops import scaled_fp4_quant from vllm.scalar_type import scalar_types FLOAT4_E2M1_MAX = scalar_types.float4_e2m1f.max() FLOAT8_E4M3_MAX = torch.finfo(torch.float8_e4m3fn).max kE2M1ToFloat = torch.tensor( [0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 6.0], dtype=torch.float32 ) def convert_swizzled_to_linear(a_sf_swizzled: torch.Tensor, m, k, block_size): m_tiles = (m + 128 - 1) // 128 f = block_size * 4 k_tiles = (k + f - 1) // f tmp = torch.reshape(a_sf_swizzled, (1, m_tiles, k_tiles, 32, 4, 4)) tmp = torch.permute(tmp, (0, 1, 4, 3, 2, 5)) out = tmp.reshape(m_tiles * 128, k_tiles * f // block_size) return out[0:m, 0:k] def dequantize_nvfp4_to_dtype( tensor_fp4, tensor_sf, global_scale, dtype, device, block_size=16 ): """Dequantize the fp4 tensor back to high precision.""" # Two fp4 values are packed into one uint8. assert tensor_fp4.dtype == torch.uint8 m, packed_k = tensor_fp4.shape k = packed_k * 2 tensor_f32 = break_fp4_bytes(tensor_fp4, dtype) tensor_f32 = tensor_f32.reshape(m, k // block_size, block_size) tensor_sf = tensor_sf.view(torch.float8_e4m3fn) tensor_sf = convert_swizzled_to_linear(tensor_sf, m, k, block_size) tensor_sf_dtype = tensor_sf.to(torch.float32) / global_scale # scale the tensor out = (tensor_f32 * tensor_sf_dtype.unsqueeze(-1)).reshape(m, k) return out.to(dtype=dtype) def break_fp4_bytes(a, dtype): assert a.dtype == torch.uint8 m, n = a.shape # Vectorized nibble processing a_flat = a.flatten() high = (a_flat & 0xF0) >> 4 # Upper nibbles low = a_flat & 0x0F # Lower nibbles # Combine nibbles for batch processing combined = torch.stack((low, high), dim=1).flatten() # Vectorized sign and magnitude extraction signs = (combined & 0x08).to(torch.bool) # Sign bits abs_vals = (combined & 0x07).to(torch.long) # Magnitude indices # Device-aware lookup and sign application kE2M1 = kE2M1ToFloat.to(device=a.device) values = kE2M1[abs_vals] * torch.where(signs, -1.0, 1.0) # Reshape to final form return values.reshape(m, n * 2).to(dtype=dtype) def get_nvfp4_global_scale(a: torch.Tensor): return (FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX) / torch.abs(a).max().to(torch.float32) def quant_nvfp4_tensor(a: torch.Tensor): a_global_scale = get_nvfp4_global_scale(a) a_quant, a_block_scale = scaled_fp4_quant(a, a_global_scale) return a_quant, a_block_scale, a_global_scale
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_block_fp8.py
tests/kernels/quantization/test_block_fp8.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # Adapted from https://github.com/sgl-project/sglang/pull/2575 import itertools import pytest import torch from tests.kernels.quant_utils import ( native_per_token_group_quant_fp8, native_w8a8_block_matmul, ) from vllm.config import VllmConfig from vllm.model_executor.layers.quantization.utils.fp8_utils import ( cutlass_scaled_mm, per_token_group_quant_fp8, w8a8_triton_block_scaled_mm, ) from vllm.platforms import current_platform from vllm.utils.deep_gemm import ( fp8_gemm_nt, get_col_major_tma_aligned_tensor, per_block_cast_to_fp8, should_use_deepgemm_for_fp8_linear, ) from vllm.utils.import_utils import has_deep_gemm if current_platform.get_device_capability() < (9, 0): pytest.skip("FP8 Triton requires CUDA 9.0 or higher", allow_module_level=True) vllm_config = VllmConfig() # Test configurations DTYPES = [torch.bfloat16] # [torch.half, torch.bfloat16, torch.float32] NUM_TOKENS = [7, 2050] D = [512, 4096, 5120, 13824] GROUP_SIZE = [64, 128, 512] M = [1, 7, 8, 83, 84, 4096] N = [128, 512, 7168, 7748, 13824] K = [256, 3884, 4096, 13824, 16384] # Deepseek-V3's intermediate size 18432, so N is 18432*2/8=4608 at TP8 # and its hidden size is 7168. BLOCK_SIZE = [[128, 128]] OUT_DTYPES = [torch.bfloat16] # [torch.float32, torch.half, torch.bfloat16] SEEDS = [0] # Skip all tests if CUDA is not available pytest.importorskip("torch.cuda") @pytest.fixture(autouse=True) def setup_cuda(): torch.set_default_device("cuda") @pytest.mark.skipif( current_platform.is_fp8_fnuz(), reason="This platform supports e4m3fnuz, not e4m3fn.", ) @pytest.mark.parametrize( "num_tokens,d,dtype,group_size,seed", itertools.product(NUM_TOKENS, D, DTYPES, GROUP_SIZE, SEEDS), ) @torch.inference_mode() def test_per_token_group_quant_fp8(num_tokens, d, dtype, group_size, seed): torch.manual_seed(seed) x = torch.rand(num_tokens, d, dtype=dtype) ref_out, ref_scale = native_per_token_group_quant_fp8(x, group_size) out, scale = per_token_group_quant_fp8(x, group_size) assert torch.allclose(out.to(torch.float32), ref_out.to(torch.float32), rtol=0.15) assert torch.allclose(scale, ref_scale) @pytest.mark.parametrize( "M,N,K,block_size,out_dtype,seed", itertools.product(M, N, K, BLOCK_SIZE, OUT_DTYPES, SEEDS), ) @torch.inference_mode() def test_w8a8_block_fp8_matmul(M, N, K, block_size, out_dtype, seed): torch.manual_seed(seed) factor_for_scale = 1e-2 fp8_info = torch.finfo(current_platform.fp8_dtype()) fp8_max, fp8_min = fp8_info.max, fp8_info.min A_fp32 = (torch.rand(M, K, dtype=torch.float32) - 0.5) * 2 * fp8_max A_fp8 = A_fp32.clamp(min=fp8_min, max=fp8_max).to(current_platform.fp8_dtype()) B_fp32 = (torch.rand(N, K, dtype=torch.float32) - 0.5) * 2 * fp8_max B_fp8 = B_fp32.clamp(min=fp8_min, max=fp8_max).to(current_platform.fp8_dtype()) block_n, block_k = block_size[0], block_size[1] n_tiles = (N + block_n - 1) // block_n k_tiles = (K + block_k - 1) // block_k As = torch.rand(M, k_tiles, dtype=torch.float32) * factor_for_scale Bs = torch.rand(n_tiles, k_tiles, dtype=torch.float32) * factor_for_scale ref_out = native_w8a8_block_matmul(A_fp8, B_fp8, As, Bs, block_size, out_dtype) out = w8a8_triton_block_scaled_mm(A_fp8, B_fp8, As, Bs, block_size, out_dtype) rel_diff = torch.mean( torch.abs(out.to(torch.float32) - ref_out.to(torch.float32)) ) / torch.mean(torch.abs(ref_out.to(torch.float32))) assert rel_diff < 0.001 @pytest.mark.skipif( not current_platform.is_cuda(), reason="CUTLASS only supported on CUDA platform." ) @torch.inference_mode() def test_w8a8_block_fp8_cutlass_matmul(): # Test simple case where weight.shape % 128 != 0, # like in DSV3 kv_a_proj_with_mqa M = 32 N = 576 K = 7168 block_size = [128, 128] out_dtype = torch.bfloat16 seed = 0 torch.manual_seed(seed) factor_for_scale = 1e-2 fp8_info = torch.finfo(torch.float8_e4m3fn) fp8_max, fp8_min = fp8_info.max, fp8_info.min A_fp32 = (torch.rand(M, K, dtype=torch.float32) - 0.5) * 2 * fp8_max B_fp32 = (torch.rand(N, K, dtype=torch.float32) - 0.5) * 2 * fp8_max B_fp8 = B_fp32.clamp(min=fp8_min, max=fp8_max).to(torch.float8_e4m3fn) block_n, block_k = block_size[0], block_size[1] n_tiles = (N + block_n - 1) // block_n k_tiles = (K + block_k - 1) // block_k Bs = torch.rand(n_tiles, k_tiles, dtype=torch.float32) * factor_for_scale # Hopper requires row-major format for scales Bs_cutlass = Bs.T.contiguous() if current_platform.is_device_capability(90) else Bs A_fp8, As = per_token_group_quant_fp8( A_fp32, block_size[1], column_major_scales=False ) # CUTLASS uses column-major format for scales A_fp8_cutlass, As_cutlass = per_token_group_quant_fp8( A_fp32, block_size[1], column_major_scales=True ) ref_out = native_w8a8_block_matmul(A_fp8, B_fp8, As, Bs, block_size, out_dtype) out = cutlass_scaled_mm( A_fp8_cutlass, B_fp8, As_cutlass, Bs_cutlass, block_size, out_dtype ) rel_diff = torch.mean( torch.abs(out.to(torch.float32) - ref_out.to(torch.float32)) ) / torch.mean(torch.abs(ref_out.to(torch.float32))) assert rel_diff < 0.001 @pytest.mark.skipif( current_platform.is_fp8_fnuz(), reason="This platform supports e4m3fnuz, not e4m3fn.", ) @pytest.mark.parametrize( "M,N,K,block_size,out_dtype,seed", itertools.product(M, N, K, BLOCK_SIZE, OUT_DTYPES, SEEDS), ) @pytest.mark.skipif(not has_deep_gemm(), reason="DeepGemm kernels not available.") @torch.inference_mode() def test_w8a8_block_fp8_deep_gemm_matmul(M, N, K, block_size, out_dtype, seed): torch.manual_seed(seed) fp8_info = torch.finfo(torch.float8_e4m3fn) fp8_max = fp8_info.max A_fp32 = (torch.rand(M, K, dtype=torch.float32) - 0.5) * 2 * fp8_max B_fp32 = (torch.rand(N, K, dtype=torch.float32) - 0.5) * 2 * fp8_max # only aligned sizes are supported by deepgemm if not should_use_deepgemm_for_fp8_linear( output_dtype=out_dtype, weight=B_fp32, supports_deep_gemm=True ): pytest.skip(f"Skipping test; invalid size {M}, {N}, {K}") A_fp8, As_fp8 = per_token_group_quant_fp8(A_fp32, block_size[1]) B_fp8, Bs_fp8 = per_block_cast_to_fp8(B_fp32, block_size=block_size) As = As_fp8.to(torch.float32) Bs = Bs_fp8.to(torch.float32) ref_out = native_w8a8_block_matmul(A_fp8, B_fp8, As, Bs, block_size, out_dtype) # Transpose earlier so that the testing will not trigger transposing kernels As_fp8 = get_col_major_tma_aligned_tensor(As_fp8) out = torch.zeros((M, N), device="cuda", dtype=out_dtype) assert As_fp8.shape == (M, (K + 127) // 128), ( f"{As_fp8.shape} != {(M, (K + 127) // 128)}" ) fp8_gemm_nt((A_fp8, As_fp8), (B_fp8, Bs_fp8), out) rel_diff = torch.mean( torch.abs(out.to(torch.float32) - ref_out.to(torch.float32)) ) / torch.mean(torch.abs(ref_out.to(torch.float32))) assert rel_diff < 0.001
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_fp8_quant_group.py
tests/kernels/quantization/test_fp8_quant_group.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Tests for QuantFP8 Group Quantization implementation.""" import pytest import torch from vllm.model_executor.layers.quantization.input_quant_fp8 import QuantFP8 from vllm.model_executor.layers.quantization.utils.quant_utils import GroupShape from vllm.platforms import current_platform @pytest.mark.parametrize( "batch_size,hidden_dim,group_size", [ (16, 256, 32), # Small (64, 1024, 64), # Medium (128, 2048, 128), # Large (8, 513, 64), # Non-divisible (native only) ], ) @pytest.mark.parametrize("seed", [42]) @pytest.mark.parametrize("use_ue8m0", [True, False]) @torch.inference_mode() def test_quantfp8_group_functionality( batch_size: int, hidden_dim: int, group_size: int, seed: int, use_ue8m0: bool ) -> None: """Test QuantFP8 group quantization with various configurations. Tests both CUDA and native implementations, column-major scales, and verifies consistency between implementations. """ current_platform.seed_everything(seed) x = torch.randn((batch_size, hidden_dim), dtype=torch.bfloat16, device="cuda") * 8 expected_num_groups = (hidden_dim + group_size - 1) // group_size is_divisible = hidden_dim % group_size == 0 group_shape = GroupShape(1, group_size) quant_op = QuantFP8( static=False, group_shape=group_shape, column_major_scales=False, use_ue8m0=use_ue8m0, ) # 1. Test native implementation (always available) x_quant_native, scales_native = quant_op.forward_native(x.clone()) assert x_quant_native.shape == x.shape assert scales_native.shape == (batch_size, expected_num_groups) # 2. Test column-major scales configuration quant_op_col = QuantFP8( static=False, group_shape=group_shape, column_major_scales=True, use_ue8m0=use_ue8m0, ) _, scales_col = quant_op_col.forward_native(x.clone()) assert scales_col.shape == (batch_size, expected_num_groups) assert scales_col.stride(0) == 1 assert scales_col.stride(1) == batch_size # Test column-major scales consistency torch.testing.assert_close(scales_col, scales_native, rtol=1e-9, atol=1e-8) # 3. Test CUDA implementation (only for divisible dimensions) if is_divisible: x_quant_cuda, scales_cuda = quant_op.forward_cuda(x.clone()) assert x_quant_cuda.shape == x.shape assert scales_cuda.shape == (batch_size, expected_num_groups) # Verify CUDA/native consistency torch.testing.assert_close(scales_cuda, scales_native, rtol=2e-7, atol=2e-8) # Quantized values should mostly match diff_count = (x_quant_cuda != x_quant_native).sum().item() diff_ratio = diff_count / x_quant_cuda.numel() assert diff_ratio < 0.002, f"Too many differences: {diff_ratio:.4%}" @pytest.mark.parametrize("seed", [42]) @pytest.mark.parametrize("use_ue8m0", [True, False]) @torch.inference_mode() def test_quantfp8_group_multidimensional(seed: int, use_ue8m0: bool) -> None: current_platform.seed_everything(seed) group_size = 64 # Test with 3D input batch1, batch2, hidden_dim = 4, 8, 1024 x_3d = ( torch.randn((batch1, batch2, hidden_dim), dtype=torch.bfloat16, device="cuda") * 8 ) group_shape = GroupShape(1, group_size) quant_op = QuantFP8( static=False, group_shape=group_shape, column_major_scales=False, use_ue8m0=use_ue8m0, ) x_quant, scales = quant_op.forward_native(x_3d.clone()) assert x_quant.shape == x_3d.shape assert scales.shape == (batch1, batch2, hidden_dim // group_size) # Test column_major_scales with multi-dim quant_op_col = QuantFP8( static=False, group_shape=group_shape, column_major_scales=True, use_ue8m0=use_ue8m0, ) _, scales_col = quant_op_col.forward_native(x_3d.clone()) assert scales_col.shape == (batch1, batch2, hidden_dim // group_size) # Test with 4D input batch1, batch2, batch3, hidden_dim = 2, 3, 4, 256 x_4d = ( torch.randn( (batch1, batch2, batch3, hidden_dim), dtype=torch.bfloat16, device="cuda" ) * 8 ) x_quant_4d, scales_4d = quant_op.forward_native(x_4d.clone()) assert x_quant_4d.shape == x_4d.shape assert scales_4d.shape == (batch1, batch2, batch3, hidden_dim // group_size) _, scales_4d_col = quant_op_col.forward_native(x_4d.clone()) assert scales_4d_col.shape == (batch1, batch2, hidden_dim // group_size, batch3) @pytest.mark.parametrize("seed", [42]) @torch.inference_mode() def test_quantfp8_group_edge_cases(seed: int) -> None: current_platform.seed_everything(seed) batch_size = 16 group_size = 64 # Test with single group (group_size >= hidden_dim) x_small = torch.randn((batch_size, 32), dtype=torch.bfloat16, device="cuda") * 8 group_shape = GroupShape(1, group_size) quant_op = QuantFP8( static=False, group_shape=group_shape, column_major_scales=False ) x_quant_small, scales_small = quant_op.forward_native(x_small.clone()) assert x_quant_small.shape == x_small.shape assert scales_small.shape == (batch_size, 1) # Test with zero inputs x_zero = torch.zeros((batch_size, 256), dtype=torch.bfloat16, device="cuda") x_quant_zero, scales_zero = quant_op.forward_native(x_zero.clone()) assert x_quant_zero.shape == x_zero.shape assert (scales_zero > 0).all(), "Scales should be clamped to minimum" # Test very large values x_large = torch.full((batch_size, 256), 1000.0, dtype=torch.bfloat16, device="cuda") x_quant_large, scales_large = quant_op.forward_native(x_large.clone()) assert x_quant_large.shape == x_large.shape # FP8 max is typically 448 or 224, so scales should be > 1 assert (scales_large > 1.0).all(), "Large values should have scales > 1"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_silu_mul_nvfp4_quant.py
tests/kernels/quantization/test_silu_mul_nvfp4_quant.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from tests.kernels.quantization.nvfp4_utils import ( FLOAT4_E2M1_MAX, FLOAT8_E4M3_MAX, dequantize_nvfp4_to_dtype, ) from vllm._custom_ops import scaled_fp4_quant from vllm.model_executor.layers.activation import SiluAndMul from vllm.platforms import current_platform if not current_platform.has_device_capability(100): pytest.skip( reason="Nvfp4 Requires compute capability of 10 or above.", allow_module_level=True, ) FP4_DTYPE = torch.uint8 FP8_DTYPE = current_platform.fp8_dtype() DTYPES = [torch.float16, torch.bfloat16] SHAPES = [(128, 256), (128, 128), (256, 256), (256, 128)] BLOCK_SIZE = 16 @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("shape", SHAPES) @torch.inference_mode() def test_silu_mul_nvfp4_quant( dtype: torch.dtype, shape: tuple[int, int], ) -> None: current_platform.seed_everything(42) device = "cuda:0" torch.set_default_device(device) x = torch.randn(shape, dtype=dtype) # ref op ref_output = SiluAndMul().forward_native(x) ref_global_scale = (FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX) / torch.abs( ref_output ).max().to(torch.float32) ref_output_quant, ref_block_scale = scaled_fp4_quant(ref_output, ref_global_scale) # fused op fused_output_quant = torch.empty_like(ref_output_quant) fused_block_scale = torch.empty_like(ref_block_scale) torch.ops._C.silu_and_mul_nvfp4_quant( fused_output_quant, fused_block_scale, x, ref_global_scale ) # check dtype assert ref_output_quant.dtype == FP4_DTYPE assert fused_output_quant.dtype == FP4_DTYPE assert ref_output_quant.shape == fused_output_quant.shape assert ref_block_scale.dtype == FP8_DTYPE assert fused_block_scale.dtype == FP8_DTYPE assert ref_block_scale.shape == fused_block_scale.shape # check dequantized output ref_output_dequant = dequantize_nvfp4_to_dtype( ref_output_quant, ref_block_scale, ref_global_scale, dtype, device ) fused_output_dequant = dequantize_nvfp4_to_dtype( fused_output_quant, fused_block_scale, ref_global_scale, dtype, device ) atol, rtol = 3e-1, 3e-1 torch.testing.assert_close( ref_output_dequant, fused_output_dequant, atol=atol, rtol=rtol )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_fp8_quant.py
tests/kernels/quantization/test_fp8_quant.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch import vllm._custom_ops as ops from tests.kernels.quant_utils import ( FP8_DTYPE, ref_dynamic_per_tensor_fp8_quant, ref_dynamic_per_token_quant, ) from tests.kernels.utils import opcheck from vllm.platforms import current_platform DTYPES = [torch.bfloat16, torch.float] HIDDEN_SIZES = [17, 1024, 1025, 1026, 5137, 8193] NUM_TOKENS = [1, 7, 4096] SCALE_UBS = [True, False] SEEDS = [0] def opcheck_fp8_quant( output, input, scale=None, scale_ub=None, use_per_token_if_dynamic=False ): if scale is not None: opcheck(torch.ops._C.static_scaled_fp8_quant, (output, input, scale)) elif use_per_token_if_dynamic: scale = torch.empty( (input.shape[0], 1), device=input.device, dtype=torch.float32 ) opcheck( torch.ops._C.dynamic_per_token_scaled_fp8_quant, (output, input, scale, scale_ub), ) else: scale = torch.empty( (input.numel() // input.shape[-1], 1), device=input.device, dtype=torch.float32, ) opcheck(torch.ops._C.dynamic_scaled_fp8_quant, (output, input, scale)) @pytest.mark.parametrize("num_tokens", NUM_TOKENS) @pytest.mark.parametrize("hidden_size", HIDDEN_SIZES) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("scale_ub", SCALE_UBS) @pytest.mark.parametrize("seed", SEEDS) @torch.inference_mode() def test_dynamic_per_token_fp8_quant( num_tokens: int, hidden_size: int, dtype: torch.dtype, scale_ub: bool, seed: int ) -> None: current_platform.seed_everything(seed) x = ( torch.rand(num_tokens, hidden_size, dtype=dtype, device="cuda") + 1e-6 ) # avoid nans scale_ub = ( torch.mean(x).to(dtype=torch.float32, device="cuda") if scale_ub else None ) ref_out, ref_scales = ref_dynamic_per_token_quant(x, FP8_DTYPE, scale_ub) ops_out, ops_scales = ops.scaled_fp8_quant( x, scale_ub=scale_ub, use_per_token_if_dynamic=True ) torch.testing.assert_close(ref_scales, ops_scales) torch.testing.assert_close( ref_out.to(dtype=torch.float32), ops_out.to(dtype=torch.float32) ) opcheck_fp8_quant(ops_out, x, None, scale_ub, use_per_token_if_dynamic=True) @pytest.mark.parametrize("num_tokens", NUM_TOKENS) @pytest.mark.parametrize("hidden_size", HIDDEN_SIZES) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @torch.inference_mode() def test_dynamic_per_tensor_fp8_quant( num_tokens: int, hidden_size: int, dtype: torch.dtype, seed: int ) -> None: current_platform.seed_everything(seed) x = torch.rand(num_tokens, hidden_size, dtype=dtype, device="cuda") ref_out, ref_scale = ref_dynamic_per_tensor_fp8_quant(x) ops_out, ops_scale = ops.scaled_fp8_quant(x) torch.testing.assert_close(ref_scale, ops_scale) torch.testing.assert_close( ref_out.to(dtype=torch.float32), ops_out.to(dtype=torch.float32) ) opcheck_fp8_quant(ops_out, x) # Regression test for a case with large activations where an int32 index cannot # represent the number of elements. @torch.inference_mode() @pytest.mark.parametrize("seed", SEEDS) def test_fp8_quant_large(seed: int) -> None: current_platform.seed_everything(seed) num_tokens = 1024000 # Mistral-Nemo's max_position_embeddings hidden_size = 1152 # Smallest hidden_size to reproduce the error dtype = torch.bfloat16 x = torch.rand(num_tokens, hidden_size, dtype=dtype, device="cuda") ref_out, scale = ref_dynamic_per_tensor_fp8_quant(x) ops_out, _ = ops.scaled_fp8_quant(x, scale) # Minimize memory footprint in this test by freeing x and upconverting # the outputs in place. (torch.allclose does not support fp8) del x ref_out = ref_out.to(dtype=dtype) ops_out = ops_out.to(dtype=dtype) torch.testing.assert_close(ref_out, ops_out)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_int8_quant.py
tests/kernels/quantization/test_int8_quant.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from tests.kernels.quant_utils import ref_dynamic_per_token_quant from tests.kernels.utils import opcheck from vllm._custom_ops import scaled_int8_quant from vllm.platforms import current_platform DTYPES = [torch.bfloat16, torch.float] HIDDEN_SIZES = [17, 1024, 1025, 1026, 5137, 8193] NUM_TOKENS = [1, 7, 4096] SEEDS = [0] SCALE = [0.1, 2.1] def opcheck_int8_quant_static(output, input, scale, azp=None): if azp is None: opcheck(torch.ops._C.static_scaled_int8_quant, (output, input, scale, None)) else: opcheck(torch.ops._C.static_scaled_int8_quant, (output, input, scale, azp)) def opcheck_int8_quant_dynamic(output, input, symmetric=True): scale = torch.empty( (input.numel() // input.shape[-1], 1), device=input.device, dtype=torch.float32 ) if symmetric: opcheck(torch.ops._C.dynamic_scaled_int8_quant, (output, input, scale, None)) else: azp = torch.empty( (input.numel() // input.shape[-1], 1), device=input.device, dtype=torch.int32, ) opcheck(torch.ops._C.dynamic_scaled_int8_quant, (output, input, scale, azp)) @pytest.mark.parametrize("num_tokens", NUM_TOKENS) @pytest.mark.parametrize("hidden_size", HIDDEN_SIZES) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @torch.inference_mode() def test_dynamic_scaled_int8_quant( num_tokens: int, hidden_size: int, dtype: torch.dtype, seed: int ) -> None: current_platform.seed_everything(seed) x = torch.rand(num_tokens, hidden_size, dtype=dtype, device="cuda") * 1000 # reference ref_out, ref_scales = ref_dynamic_per_token_quant(x, torch.int8) # kernel ops_out, ops_scales, _ = scaled_int8_quant(x) torch.testing.assert_close(ops_scales, ref_scales) # big atol to account for rounding errors torch.testing.assert_close(ops_out, ref_out, atol=1, rtol=0.0) opcheck_int8_quant_dynamic(ops_out, x) @pytest.mark.parametrize("num_tokens", NUM_TOKENS) @pytest.mark.parametrize("hidden_size", HIDDEN_SIZES) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @torch.inference_mode() def test_dynamic_scaled_int8_azp_quant( num_tokens: int, hidden_size: int, dtype: torch.dtype, seed: int ) -> None: current_platform.seed_everything(seed) int8_traits = torch.iinfo(torch.int8) x = torch.rand(num_tokens, hidden_size, dtype=dtype, device="cuda") * 1000 - 300 x_token_max, _ = x.to(dtype=torch.float32).max(dim=1, keepdim=True) x_token_min, _ = x.to(dtype=torch.float32).min(dim=1, keepdim=True) # calculate scale and azp, and adjust the range scales = (x_token_max - x_token_min) / torch.tensor(255.0) azps = torch.round(torch.tensor(-128.0) - x_token_min / scales).to(torch.int32) torch_out = ( ((x / scales).round() + azps) .clamp(int8_traits.min, int8_traits.max) .to(torch.int8) ) assert torch_out.min() >= int8_traits.min and torch_out.max() <= int8_traits.max ops_out, scales_out, azp_out = scaled_int8_quant(x, symmetric=False) if not torch.allclose(scales_out, scales): print(torch.argmax(torch.abs(scales_out - scales))) torch.testing.assert_close(scales_out, scales) # big atol to account for rounding errors torch.testing.assert_close(azp_out, azps, atol=1, rtol=0.0) # if AZP is off by 1, after rounding-to-even, the output may be off by 2 torch.testing.assert_close(ops_out, torch_out, atol=2, rtol=0.0) opcheck_int8_quant_dynamic(ops_out, x, False) @pytest.mark.parametrize("num_tokens", NUM_TOKENS) @pytest.mark.parametrize("hidden_size", HIDDEN_SIZES) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.parametrize("scale", SCALE) @torch.inference_mode() def test_static_scaled_int8_quant( num_tokens: int, hidden_size: int, dtype: torch.dtype, seed: int, scale: float ) -> None: current_platform.seed_everything(seed) int8_traits = torch.iinfo(torch.int8) x = torch.rand(num_tokens, hidden_size, dtype=dtype, device="cuda") * 1000 scale_arg = torch.tensor([scale], dtype=torch.float32, device="cuda") out1 = ( (x / scale_arg).round().clamp(int8_traits.min, int8_traits.max).to(torch.int8) ) out2, scale2, _ = scaled_int8_quant(x, scale_arg) assert scale2 is scale_arg # big atol to account for rounding errors torch.testing.assert_close(out1, out2, atol=1, rtol=0.0) opcheck_int8_quant_static(out2, x, scale_arg) @pytest.mark.parametrize("num_tokens", NUM_TOKENS) @pytest.mark.parametrize("hidden_size", HIDDEN_SIZES) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.parametrize("scale", SCALE) @pytest.mark.parametrize("azp", [-255, 54]) @torch.inference_mode() def test_static_scaled_int8_azp_quant( num_tokens: int, hidden_size: int, dtype: torch.dtype, seed: int, scale: float, azp: int, ) -> None: current_platform.seed_everything(seed) int8_traits = torch.iinfo(torch.int8) x = torch.rand(num_tokens, hidden_size, dtype=dtype, device="cuda") * 1000 - 300 out1 = ( ((x / scale).round() + azp) .clamp(int8_traits.min, int8_traits.max) .to(torch.int8) ) scale_arg = torch.tensor([scale], dtype=torch.float32, device="cuda") azp_arg = torch.tensor([azp], dtype=torch.int32, device="cuda") out2, scale2, azp2 = scaled_int8_quant(x, scale_arg, azp_arg, symmetric=False) assert scale2 is scale_arg assert azp2 is azp_arg # big atol to account for rounding errors torch.testing.assert_close(out1, out2, atol=1, rtol=0.0) opcheck_int8_quant_static(out2, x, scale_arg, azp_arg) @pytest.mark.parametrize("is_max", [True, False]) @torch.inference_mode() def test_static_scaled_int8_azp_quant_saturating_cast(is_max: bool) -> None: # Test that the saturating cast works correctly for values near i32 max/min from numpy import inf, nextafter int32_traits = torch.iinfo(torch.int32) val = float(int32_traits.max if is_max else int32_traits.min) x_vals = [[nextafter(val, inf), val + 1, val, val - 1, nextafter(val, -inf)]] x = torch.tensor(x_vals, dtype=torch.float32, device="cuda") # The calculation in the kernel is: cast<int8>(cast<int32>(x / scale) + azp) # where cast<T> is a saturating cast to type T. # Scale is set to 1.0 so that the input values are the ones that are cast. # AZP is set to 0 to make sure the int8 saturating cast is tested as well. scale = torch.scalar_tensor(1.0, dtype=torch.float32, device="cuda") azp = torch.scalar_tensor(0, dtype=torch.int32, device="cuda") int8_traits = torch.iinfo(torch.int8) val_i8 = int8_traits.max if is_max else int8_traits.min expected = torch.full((1, 5), val_i8, dtype=torch.int8, device="cuda") out, _, _ = scaled_int8_quant(x, scale, azp, symmetric=False) torch.testing.assert_close(expected, out, atol=0, rtol=0)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_hadacore.py
tests/kernels/quantization/test_hadacore.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import math import pytest import torch from compressed_tensors.transform import deterministic_hadamard_matrix from vllm import _custom_ops as ops from vllm.platforms import current_platform if current_platform.is_rocm(): pytest.skip( "These tests require hadacore_transform, not supported on ROCm.", allow_module_level=True, ) @pytest.mark.parametrize("batch_size", [1, 32]) @pytest.mark.parametrize("hidden_dim", [2**n for n in range(10)]) def test_hadacore(batch_size, hidden_dim, dtype=torch.bfloat16, device="cuda"): x = torch.eye(hidden_dim, dtype=dtype, device=device) hadamard = deterministic_hadamard_matrix( hidden_dim, dtype=torch.float64, device="cuda" ) / math.sqrt(hidden_dim) y = ops.hadacore_transform(x.clone()) y_true = (x.to(hadamard.dtype) @ hadamard.T).to(y.dtype) assert torch.allclose(y, y_true) y = ops.hadacore_transform(y) assert torch.allclose(y, x)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_rocm_skinny_gemms.py
tests/kernels/quantization/test_rocm_skinny_gemms.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import math import pytest import torch import vllm._custom_ops as ops from tests.kernels.quant_utils import ref_dynamic_per_tensor_fp8_quant from vllm.platforms import current_platform from vllm.utils.platform_utils import get_cu_count DTYPES = [torch.bfloat16, torch.float16] # Specific (N, K, M) combinations for targeted testing NKM_FACTORS_LLMM1 = [ # Small, medium, large cases (1, 8, 16), (1, 32, 64), (1, 128, 256), (1, 512, 1024), (1, 2048, 4096), # Edge cases with specific K sizes (1, 6144, 1024), (1, 8192, 2048), # Very large case (1, 4096, 8192), ] NKM_FACTORS_WVSPLITK = [ # Different batch sizes with key dimensions (1, 16, 16), (1, 64, 64), (2, 256, 256), (3, 1024, 1024), (4, 4096, 4096), # Extended K values (1, 9216, 512), (2, 10240, 1024), (4, 16384, 8192), # Minimum M constraint validation (m >= 8) (1, 64, 8), (2, 128, 8), (4, 256, 8), ] NKM_FACTORS_WVSPLITK_FP8 = [ # FP8-specific cases with K % 16 == 0 (1, 16, 16), (1, 64, 64), (2, 512, 512), (3, 2048, 2048), (4, 4096, 4096), (4, 16400, 2048), # Extended FP8 dimensions not covered by WVSPLITK (1, 14336, 1024), (2, 24576, 2048), (4, 32768, 28672), ] SEEDS = [0] @pytest.mark.parametrize("n,k,m", NKM_FACTORS_LLMM1) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("rows_per_block", [2, 4, 8, 16]) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.skipif(not current_platform.is_rocm(), reason="only test for rocm") @torch.inference_mode() def test_rocm_llmm1_kernel(n, k, m, dtype, rows_per_block, seed): torch.manual_seed(seed) # TODO: Zero-centering the inputs causes errors for LLMM1! # Without that the numbers quickly saturate, and may # be giving false matches. A = torch.rand(n, k, dtype=dtype, device="cuda") B = torch.rand(m, k, dtype=dtype, device="cuda") ref_out = torch.matmul(A, B.t()) out = ops.LLMM1(B, A, rows_per_block) assert torch.allclose(out, ref_out, rtol=0.01) @pytest.mark.parametrize("n,k,m", NKM_FACTORS_WVSPLITK) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.skipif(not current_platform.is_rocm(), reason="only test for rocm") def test_rocm_wvsplitk_kernel(n, k, m, dtype, seed): torch.manual_seed(seed) cu_count = get_cu_count() A = torch.rand(n, k, dtype=dtype, device="cuda") - 0.5 B = torch.rand(m, k, dtype=dtype, device="cuda") - 0.5 ref_out = torch.nn.functional.linear(A, B) out = ops.wvSplitK(B, A.view(-1, A.size(-1)), cu_count) assert torch.allclose(out, ref_out, rtol=0.01) @pytest.mark.parametrize("n,k,m", NKM_FACTORS_WVSPLITK) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.skipif(not current_platform.is_rocm(), reason="only test for rocm") def test_rocm_wvsplitk_bias1D_kernel(n, k, m, dtype, seed): torch.manual_seed(seed) cu_count = get_cu_count() xavier = math.sqrt(2 / k) # normalize to avoid large output-bias deltas A = (torch.rand(n, k, dtype=dtype, device="cuda") - 0.5) * xavier B = (torch.rand(m, k, dtype=dtype, device="cuda") - 0.5) * xavier BIAS = torch.rand(m, dtype=dtype, device="cuda") - 0.5 ref_out = torch.nn.functional.linear(A, B, BIAS) out = ops.wvSplitK(B, A.view(-1, A.size(-1)), cu_count, BIAS) assert torch.allclose(out, ref_out, rtol=0.01) @pytest.mark.parametrize("n,k,m", NKM_FACTORS_WVSPLITK) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.skipif(not current_platform.is_rocm(), reason="only test for rocm") def test_rocm_wvsplitk_bias2D_kernel(n, k, m, dtype, seed): torch.manual_seed(seed) cu_count = get_cu_count() xavier = math.sqrt(2 / k) # normalize to avoid large output-bias deltas A = (torch.rand(n, k, dtype=dtype, device="cuda") - 0.5) * xavier B = (torch.rand(m, k, dtype=dtype, device="cuda") - 0.5) * xavier BIAS = torch.rand(n, m, dtype=dtype, device="cuda") - 0.5 ref_out = torch.nn.functional.linear(A, B, BIAS) out = ops.wvSplitK(B, A.view(-1, A.size(-1)), cu_count, BIAS) assert torch.allclose(out, ref_out, rtol=0.01) @pytest.mark.parametrize("n,k,m", NKM_FACTORS_WVSPLITK_FP8) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.skipif( not (current_platform.is_rocm() and current_platform.supports_fp8()), reason="only test for rocm fp8", ) def test_rocm_wvsplitk_fp8_kernel(n, k, m, dtype, seed): torch.manual_seed(seed) A = torch.rand(n, k, device="cuda") - 0.5 B = torch.rand(m, k, device="cuda") - 0.5 A, scale_a = ref_dynamic_per_tensor_fp8_quant(A) B, scale_b = ref_dynamic_per_tensor_fp8_quant(B) ref_out = torch._scaled_mm( A, B.t(), out_dtype=dtype, scale_a=scale_a, scale_b=scale_b ) out = ops.wvSplitKQ( B, A, dtype, scale_a, scale_b, get_cu_count(), ) assert torch.allclose(out, ref_out, rtol=0.01) @pytest.mark.parametrize("n,k,m", NKM_FACTORS_WVSPLITK_FP8) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.skipif( not (current_platform.is_rocm() and current_platform.supports_fp8()), reason="only test for rocm fp8", ) def test_rocm_wvsplitk_fp8_bias1D_kernel(n, k, m, dtype, seed): torch.manual_seed(seed) xavier = math.sqrt(2 / k) # normalize to avoid large output-bias deltas A = (torch.rand(n, k, device="cuda") - 0.5) * xavier B = (torch.rand(m, k, device="cuda") - 0.5) * xavier BIAS = torch.rand(m, dtype=dtype, device="cuda") - 0.5 A, scale_a = ref_dynamic_per_tensor_fp8_quant(A) B, scale_b = ref_dynamic_per_tensor_fp8_quant(B) ref_out = torch._scaled_mm( A, B.t(), out_dtype=dtype, scale_a=scale_a, scale_b=scale_b, bias=BIAS ) out = ops.wvSplitKQ( B, A, dtype, scale_a, scale_b, get_cu_count(), BIAS, ) assert torch.allclose(out, ref_out, rtol=0.01)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_nvfp4_qutlass.py
tests/kernels/quantization/test_nvfp4_qutlass.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # # Copyright (C) 2025 Roberto L. Castro (Roberto.LopezCastro@ist.ac.at). # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import pytest import torch from compressed_tensors.transform.utils.hadamard import deterministic_hadamard_matrix from vllm import _custom_ops as ops # use existing nvfp4 gemm in vllm from vllm._custom_ops import fusedQuantizeNv from vllm.model_executor.layers.quantization.qutlass_utils import to_blocked from vllm.platforms import current_platform if not torch.cuda.is_available(): pytest.skip("CUDA required for these tests.", allow_module_level=True) if not ( current_platform.has_device_capability(100) or current_platform.has_device_capability(120) ): pytest.skip( reason="Tests require compute capability 10.0 (100) or 12.0 (120).", allow_module_level=True, ) # ----- Helpers ----- def get_hadamard_matrix(group_size: int, dtype: torch.dtype, device: torch.device): return ( deterministic_hadamard_matrix(group_size, dtype=dtype, device=device) * group_size**-0.5 ) def _rtne_fp4(x: torch.Tensor): device = x.device grid = torch.tensor( [ -6.0, -4.0, -3.0, -2.0, -1.5, -1.0, -0.5, -0.0, 0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 6.0, ], dtype=x.dtype, device=x.device, ) grid_int = torch.tensor( [-1, -2, -3, -4, -5, -6, -7, -8, 0, 1, 2, 3, 4, 5, 6, 7], dtype=torch.uint8, device=device, ) inds = torch.bucketize(x, grid) lo, hi = (inds - 1).clamp(min=0, max=15), inds.clamp(min=0, max=15) g_lo, g_hi = grid[lo], grid[hi] pick_hi = (g_hi - x < x - g_lo) | (g_hi - x == x - g_lo) & (grid_int[hi] % 2 == 0) y = torch.where(pick_hi, g_hi, g_lo) y_int = torch.where(pick_hi, grid_int[hi], grid_int[lo]) y_int_packed = (y_int[..., 1::2] & 0xF) << 4 | y_int[..., ::2] & 0xF return y, y_int_packed def _dq_fp4(x_e2m1: torch.Tensor, x_e4m3: torch.Tensor, alpha: float): device = x_e2m1.device x_e2m1_i32 = x_e2m1.view(dtype=torch.uint8).to(dtype=torch.int32) x_e2m1_unpacked = torch.stack( [x_e2m1_i32 & 0xF, (x_e2m1_i32 >> 4) & 0xF], dim=-1 ).flatten(start_dim=-2) grid_dq = torch.tensor( [ 0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 6.0, -0.0, -0.5, -1.0, -1.5, -2.0, -3.0, -4.0, -6.0, ], dtype=torch.float64, device=device, ) x_fp4_dq = grid_dq[x_e2m1_unpacked] scales_dq = x_e4m3.to(torch.float64) x_dq = (x_fp4_dq.unflatten(dim=-1, sizes=(-1, 16)) * scales_dq[..., None]).flatten( start_dim=-2 ) / alpha # * (4. / 3.) return x_dq, x_fp4_dq, scales_dq def _unpack_mask(clip_mask: torch.Tensor) -> torch.Tensor: clip_mask_unpacked_dq = torch.zeros( *clip_mask.shape[:-1], clip_mask.size(-1) * 8, dtype=torch.bool, device=clip_mask.device, ) for i in range(8): clip_mask_unpacked_dq[..., i::8] = (clip_mask >> i) & 1 return clip_mask_unpacked_dq def _forward_quantize_ref(x: torch.Tensor, h: torch.Tensor, rot_size: int): device = x.device xh_ref64 = ( x.unflatten(dim=-1, sizes=(-1, rot_size)).to(dtype=torch.float64) @ h.reshape(rot_size, rot_size).to(dtype=torch.float64) ).flatten(start_dim=-2) abs_max = xh_ref64.unflatten(dim=-1, sizes=(-1, 16)).abs().amax(dim=-1) scales_ref64_ = abs_max + 1e-8 xh_e4m3_ref = scales_ref64_.to(dtype=torch.float8_e4m3fn) scales_ref64 = xh_e4m3_ref.to(dtype=torch.float64) xh_scaled_ref64 = ( xh_ref64.unflatten(dim=-1, sizes=(-1, 16)) / scales_ref64[..., None] ).flatten(start_dim=-2) xh_scaled_ref64 *= 6.0 clip_mask_unpacked_ref = xh_scaled_ref64.abs() < 6.0 clip_mask_ref = torch.zeros( *x.shape[:-1], x.size(-1) // 8, dtype=torch.uint8, device=device ) for i in range(8): clip_mask_ref |= clip_mask_unpacked_ref[..., i::8].to(dtype=torch.uint8) << i xh_fp4_ref, xh_e2m1_ref = _rtne_fp4(xh_scaled_ref64) xh_dq, xh_fp4_dq, scales_dq = _dq_fp4(xh_e2m1_ref, xh_e4m3_ref, 6.0) clip_mask_unpacked_dq = _unpack_mask(clip_mask_ref) assert xh_fp4_dq.equal(xh_fp4_ref) assert scales_dq.equal(scales_ref64) assert clip_mask_unpacked_dq.equal(clip_mask_unpacked_ref) return ( xh_dq, clip_mask_unpacked_ref, (xh_e2m1_ref, xh_e4m3_ref, clip_mask_ref), ) DTYPE = torch.bfloat16 DEVICE = torch.device("cuda:0") ROT_SIZES = [16, 32, 64, 128] GLOBAL_SCALES = [6.0] LLAMA_MODELS = { "7B": [(4096, 3 * 4096), (4096, 4096), (4096, 2 * 10752), (10752, 4096)], "13B": [(5120, 3 * 5120), (5120, 5120), (5120, 2 * 13568), (13568, 5120)], "33B": [(6656, 3 * 6656), (6656, 6656), (6656, 2 * 17664), (17664, 6656)], "70B": [(8192, 3 * 8192), (8192, 8192), (8192, 2 * 21760), (21760, 8192)], } @pytest.fixture(autouse=True) def _seed_each_test(): current_platform.seed_everything(0) np.random.seed(0) torch.random.manual_seed(0) @pytest.mark.parametrize("rot_size", ROT_SIZES) @pytest.mark.parametrize("global_scale_value", GLOBAL_SCALES) @torch.inference_mode() def test_fused_quantization(rot_size: int, global_scale_value: float): dtype, device = DTYPE, DEVICE h = get_hadamard_matrix(rot_size, dtype, device) x = torch.randn(2, 4096, 4096, dtype=dtype, device=device) * 25.0 global_scale = torch.tensor([global_scale_value], device=device) xh_dq_ref, _, _ = _forward_quantize_ref(x, h, rot_size) xh_e2m1, xh_e4m3 = fusedQuantizeNv(x, h, global_scale) xh_e4m3 = xh_e4m3.reshape(2, 4096, 4096 // 16) xh_dq, *_ = _dq_fp4(xh_e2m1, xh_e4m3, alpha=global_scale_value) torch.testing.assert_close(xh_dq, xh_dq_ref, rtol=0.34, atol=100) assert (xh_dq != xh_dq_ref).float().mean() <= 1e-1 m, n, k = 504, 4096 * 2, 4096 a = torch.randn(m, k, dtype=dtype, device=device) * 25.0 b = torch.randn(n, k, dtype=dtype, device=device) * 25.0 a_e2m1, a_e4m3 = fusedQuantizeNv(a, h, global_scale) b_e2m1, b_e4m3 = fusedQuantizeNv(b, h, global_scale) a_dq, *_ = _dq_fp4(a_e2m1, a_e4m3[:m, :k], alpha=1.0) b_dq, *_ = _dq_fp4(b_e2m1, b_e4m3[:n, :k], alpha=1.0) out_ref = a_dq @ b_dq.transpose(-2, -1) a_scale_block = to_blocked(a_e4m3, backend="triton").view(-1, k // 16) b_scale_block = to_blocked(b_e4m3, backend="triton").view(-1, k // 16) alpha = torch.tensor([1.0], device=device) out = ops.cutlass_scaled_fp4_mm( a_e2m1, b_e2m1, a_scale_block, b_scale_block, alpha, torch.bfloat16 ) assert out.equal(out_ref.to(dtype=out.dtype)) @pytest.mark.parametrize("model", list(LLAMA_MODELS.keys())) @pytest.mark.parametrize("layer_idx", [0, 1, 2, 3]) @pytest.mark.parametrize("batch", [1, 16]) @pytest.mark.parametrize("rot_size", ROT_SIZES) @torch.inference_mode() def test_llama_shapes(model: str, layer_idx: int, batch: int, rot_size: int): dtype, device = DTYPE, DEVICE m = batch k, n = LLAMA_MODELS[model][layer_idx] h = get_hadamard_matrix(rot_size, dtype, device) a = torch.randn(m, k, dtype=dtype, device=device) * 25.0 b = torch.randn(n, k, dtype=dtype, device=device) * 25.0 global_scale = torch.tensor([1.0], device=device) a_e2m1, a_e4m3 = fusedQuantizeNv(a, h, global_scale) b_e2m1, b_e4m3 = fusedQuantizeNv(b, h, global_scale) a_dq, *_ = _dq_fp4(a_e2m1, a_e4m3[:m, :k], alpha=1.0) b_dq, *_ = _dq_fp4(b_e2m1, b_e4m3[:n, :k], alpha=1.0) out_ref = a_dq @ b_dq.transpose(-2, -1) a_scale_block = to_blocked(a_e4m3, backend="triton").view(-1, k // 16) b_scale_block = to_blocked(b_e4m3, backend="triton").view(-1, k // 16) alpha = torch.tensor([1.0], device=device) out = ops.cutlass_scaled_fp4_mm( a_e2m1, b_e2m1, a_scale_block, b_scale_block, alpha, torch.bfloat16 ) assert out.equal(out_ref.to(dtype=out.dtype))
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_ggml.py
tests/kernels/quantization/test_ggml.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import gguf import pytest import torch from tests.kernels.utils import opcheck from vllm import _custom_ops as ops # noqa: F401 @pytest.mark.parametrize("quant_type", [12]) def test_ggml_opcheck(quant_type): block_size, type_size = gguf.GGML_QUANT_SIZES[quant_type] shape = [256, 1152] qweight = torch.randint(0, 100, shape, device="cuda", dtype=torch.uint8) m = qweight.shape[0] n = qweight.shape[1] // type_size * block_size opcheck(torch.ops._C.ggml_dequantize, (qweight, quant_type, m, n, torch.float16)) x = torch.rand((m, 512), device="cuda", dtype=torch.float16) opcheck(torch.ops._C.ggml_mul_mat_a8, (qweight, x, quant_type, qweight.shape[0])) opcheck( torch.ops._C.ggml_mul_mat_vec_a8, (qweight, x, quant_type, qweight.shape[0]) ) shape = [256, 1024, 336] qweight = torch.randint(0, 100, shape, device="cuda", dtype=torch.uint8) x = torch.rand((1, 1024), device="cuda", dtype=torch.float16) sorted_token_ids = torch.arange(776, device="cuda") expert_ids = torch.randint(0, 256, (194,), device="cuda") num_tokens_post_padded = torch.tensor([1], dtype=torch.int64, device="cuda") opcheck( torch.ops._C.ggml_moe_a8, ( x, qweight, sorted_token_ids, expert_ids, num_tokens_post_padded, quant_type, qweight.shape[0], 1, x.shape[0], ), ) topk_ids = torch.zeros((1, 1), device="cuda", dtype=torch.int32) opcheck( torch.ops._C.ggml_moe_a8_vec, (x, qweight, topk_ids, 1, quant_type, qweight.shape[0], x.shape[0]), )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_block_int8.py
tests/kernels/quantization/test_block_int8.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # Adapted from https://github.com/sgl-project/sglang/blob/main/test/srt/test_block_int8.py import itertools import pytest import torch from tests.kernels.quant_utils import native_w8a8_block_matmul from vllm.config import VllmConfig from vllm.model_executor.layers.quantization.utils.int8_utils import ( w8a8_block_int8_matmul, ) from vllm.platforms import current_platform if current_platform.get_device_capability() < (7, 0): pytest.skip("INT8 Triton requires CUDA 7.0 or higher", allow_module_level=True) vllm_config = VllmConfig() DTYPES = [torch.half, torch.bfloat16] M = [1, 33, 64, 222] N = [128, 1024] K = [256, 4096] # BLOCK_SIZE = [[64, 64], [64, 128], [128, 64], [128, 128]] BLOCK_SIZE = [[128, 128]] SEEDS = [0] @pytest.fixture(autouse=True, scope="module") def setup_cuda(): """Sets the default CUDA device for all tests in this module.""" torch.set_default_device("cuda") @pytest.mark.parametrize( "M,N,K,block_size,out_dtype,seed", itertools.product(M, N, K, BLOCK_SIZE, DTYPES, SEEDS), ) @torch.inference_mode() def test_w8a8_block_int8_matmul(M, N, K, block_size, out_dtype, seed): torch.manual_seed(seed) factor_for_scale = 1e-2 int8_info = torch.iinfo(torch.int8) int8_max, int8_min = int8_info.max, int8_info.min A_fp32 = (torch.rand(M, K, dtype=torch.float32) - 0.5) * 2 * int8_max A_fp8 = A_fp32.clamp(min=int8_min, max=int8_max).to(torch.float8_e4m3fn) B_fp32 = (torch.rand(N, K, dtype=torch.float32) - 0.5) * 2 * int8_max B_fp8 = B_fp32.clamp(min=int8_min, max=int8_max).to(torch.float8_e4m3fn) block_n, block_k = block_size[0], block_size[1] n_tiles = (N + block_n - 1) // block_n k_tiles = (K + block_k - 1) // block_k As = torch.rand(M, k_tiles, dtype=torch.float32) * factor_for_scale Bs = torch.rand(n_tiles, k_tiles, dtype=torch.float32) * factor_for_scale ref_out = native_w8a8_block_matmul(A_fp8, B_fp8, As, Bs, block_size, out_dtype) out = w8a8_block_int8_matmul(A_fp8, B_fp8, As, Bs, block_size, out_dtype) rel_diff = torch.mean( torch.abs(out.to(torch.float32) - ref_out.to(torch.float32)) ) / torch.mean(torch.abs(ref_out.to(torch.float32))) assert rel_diff < 0.001
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_cutlass_w4a8.py
tests/kernels/quantization/test_cutlass_w4a8.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Tests for the CUTLASS W4A8 kernel. Run `pytest tests/kernels/quantization/test_cutlass_w4a8.py`. """ from dataclasses import dataclass import pytest import torch from vllm import _custom_ops as ops from vllm.model_executor.layers.quantization.utils.quant_utils import ( convert_packed_uint4b8_to_signed_int4_inplace, pack_cols, pack_rows, quantize_weights, unpack_quantized_values_into_int32, ) from vllm.platforms import current_platform from vllm.scalar_type import ScalarType, scalar_types if not current_platform.is_cuda(): pytest.skip("These tests use CUTLASS which requires CUDA", allow_module_level=True) # TODO: in future PR refactor this and `is_quant_method_supported` in the kernel # unit tests to a common utility function. Currently the use of # `is_quant_method_supported` conflates kernels with quantization methods # an assumption which is breaking down as quantizations methods can have # have kernels and some kernels support multiple quantization methods. IS_SUPPORTED_BY_GPU = current_platform.get_device_capability()[0] >= 9 MNK_SHAPES = [ (1, 128, 128), (1, 512, 1024), (1, 4096, 4096), (1, 8192, 28672), (13, 8192, 4096), (26, 4096, 8192), (64, 4096, 4096), (64, 8192, 28672), (257, 128, 4096), (257, 4096, 4096), (1024, 4096, 8192), (1024, 8192, 4096), ] # TODO(czhu): get supported schedules from fn SCHEDULES = [ "128x16_1x1x1", "256x16_1x1x1", "128x32_1x1x1", "256x32_1x1x1", "128x64_1x1x1", "256x64_1x1x1", "128x128_1x1x1", "256x128_1x1x1", "128x256_1x1x1", "128x256_2x1x1", ] @dataclass class TypeConfig: act_type: torch.dtype weight_type: ScalarType output_type: torch.dtype | None group_scale_type: torch.dtype | None channel_scale_type: torch.dtype | None token_scale_type: torch.dtype | None @dataclass class Tensors: w_ref: torch.Tensor a_ref: torch.Tensor a: torch.Tensor w_q: torch.Tensor w_g_s: torch.Tensor w_ch_s: torch.Tensor w_tok_s: torch.Tensor # (Act Type, Weight Type, Output Type, Scale Type, ZeroPoints, # Ch Scales Type, Tok Scales Type) TestTypeTuple = tuple[ list[torch.dtype], ScalarType, torch.dtype | None, torch.dtype | None, bool ] TEST_TYPES = [ *( TypeConfig( act_type=torch.float8_e4m3fn, weight_type=w_type, output_type=o_type, group_scale_type=torch.float8_e4m3fn, channel_scale_type=torch.float32, token_scale_type=torch.float32, ) for w_type in [scalar_types.int4] # TODO(czhu): fp16 out type for o_type in [torch.bfloat16] ), ] # TODO: in future PR refactor this and `is_quant_method_supported` in the kernel # unit tests to a common utility function. Currently the use of # `is_quant_method_supported` conflates kernels with quantization methods # an assumption which is breaking down as quantizations methods can have # have kernels and some kernels support multiple quantization methods. IS_SUPPORTED_BY_GPU = current_platform.has_device_capability(90) # For testing quantized linear kernels def to_fp8(tensor: torch.Tensor): finfo = torch.finfo(torch.float8_e4m3fn) return tensor.clamp(min=finfo.min, max=finfo.max).to(dtype=torch.float8_e4m3fn) def cutlass_quantize_and_pack( atype: torch.dtype, w: torch.Tensor, wtype: ScalarType, stype: torch.dtype | None, group_size: int | None, zero_points: bool = False, ): assert wtype.is_integer(), "TODO: support floating point weights" w_ref, w_q, w_s, w_zp = quantize_weights( w, wtype, group_size=group_size, zero_points=zero_points ) # since scales are cast to fp8, we need to compute w_ref this way w_ref = ( (w_q).to(torch.float32) * w_s.to(atype).to(torch.float32).repeat_interleave(group_size, dim=0) ).to(atype) # bit mask prevents sign extending int4 when packing w_q = pack_rows(w_q & 0x0F, wtype.size_bits, *w_q.shape) w_q = w_q.t().contiguous().t() # convert to col major w_q_packed = ops.cutlass_encode_and_reorder_int4b(w_q) w_s_packed = ops.cutlass_pack_scale_fp8(w_s.to(atype)) return w_ref, w_q_packed, w_s_packed, w_zp def create_test_tensors( shape: tuple[int, int, int], types: TypeConfig, group_size: int | None ) -> Tensors: m, n, k = shape print( "create_test_tensors, shape:", shape, "types:", types, "group_size:", group_size ) a = to_fp8(torch.randn((m, k), device="cuda")) w = to_fp8(torch.randn((k, n), device="cuda")) if types.group_scale_type is not None: w = w.to(types.group_scale_type) if w.dtype.itemsize == 1: w = w.to(torch.float16) w_ref, w_q_packed, w_s, _ = cutlass_quantize_and_pack( a.dtype, w, types.weight_type, types.group_scale_type, group_size, False ) a_ref = a.to(torch.float32) w_ref = w_ref.to(torch.float32) # for the practical use case we need per-tok scales for fp8 activations w_tok_s = torch.randn((m,), device="cuda", dtype=types.token_scale_type) w_ch_s = torch.randn((n,), device="cuda", dtype=types.channel_scale_type) return Tensors( w_ref=w_ref, a_ref=a_ref, a=a, w_q=w_q_packed, w_g_s=w_s, w_ch_s=w_ch_s, w_tok_s=w_tok_s, ) def mm_test_helper( types: TypeConfig, tensors: Tensors, group_size: int | None = None, schedule: str | None = None, ): # CUTLASS upstream uses fp8 with fastaccum as reference # https://github.com/NVIDIA/cutlass/blob/main/examples/55_hopper_mixed_dtype_gemm/55_hopper_int4_fp8_gemm.cu#L406 output_ref = torch._scaled_mm( tensors.a_ref.to(types.act_type), tensors.w_ref.to(types.act_type).t().contiguous().t(), # col major tensors.w_tok_s.unsqueeze(1), tensors.w_ch_s.unsqueeze(0), out_dtype=types.output_type, use_fast_accum=True, ) output = ops.cutlass_w4a8_mm( a=tensors.a, b_q=tensors.w_q, b_group_scales=tensors.w_g_s, b_group_size=group_size, b_channel_scales=tensors.w_ch_s, a_token_scales=tensors.w_tok_s, ) print(output) print(output_ref) torch.testing.assert_close( output, output_ref.to(output.dtype), rtol=1e-2, atol=1e-2 ) @pytest.mark.skipif( not IS_SUPPORTED_BY_GPU, reason="CUTLASS W4A8 is not supported on this GPU type." ) @pytest.mark.parametrize("shape", MNK_SHAPES, ids=lambda x: "x".join(str(v) for v in x)) @pytest.mark.parametrize("types", TEST_TYPES) @pytest.mark.parametrize("schedule", SCHEDULES) def test_cutlass_w4a8(shape, types: TypeConfig, schedule): group_sizes = [128] for group_size in group_sizes: tensors = create_test_tensors(shape, types, group_size) mm_test_helper(types, tensors, group_size, schedule) # Test to make sure cuda graphs work class W4A8Layer(torch.nn.Module): def __init__(self, **kwargs): super().__init__() self.kwargs = kwargs def forward(self, a): return ops.cutlass_w4a8_mm(a=a, **self.kwargs) @pytest.mark.skipif( not IS_SUPPORTED_BY_GPU, reason="CUTLASS W4A8 is not supported on this GPU type." ) def test_w4a8_cuda_graph(): m, n, k = 512, 4096, 4096 a = to_fp8(torch.randn((m, k), device="cuda")) b = to_fp8(torch.randn((k, n), device="cuda")) wtype = scalar_types.int4 stype = torch.float8_e4m3fn group_size = 128 zero_points = False w_ref, w_q_packed, w_s, _ = cutlass_quantize_and_pack( a.dtype, b.to(torch.float16), wtype, stype, group_size, zero_points ) w_tok_s = torch.randn((m,), device="cuda", dtype=torch.float32) w_ch_s = torch.randn((n,), device="cuda", dtype=torch.float32) # Construct a trivial model with a single layer that calls the kernel model = W4A8Layer( b_q=w_q_packed, b_group_scales=w_s, b_group_size=group_size, b_channel_scales=w_ch_s, a_token_scales=w_tok_s, ) output_ref = torch._scaled_mm( a, w_ref.to(a.dtype).t().contiguous().t(), # col major w_tok_s.unsqueeze(1), w_ch_s.unsqueeze(0), out_dtype=torch.bfloat16, use_fast_accum=True, ) # Run the model with a cuda graph stream = torch.cuda.Stream() with torch.cuda.stream(stream): g = torch.cuda.CUDAGraph() with torch.cuda.graph(g): output = model(a) output.zero_() g.replay() torch.testing.assert_close(output, output_ref, rtol=1e-2, atol=1e-2) @pytest.mark.skipif( not IS_SUPPORTED_BY_GPU, reason="CUTLASS W4A8 is not supported on this GPU type." ) @pytest.mark.parametrize("shape", MNK_SHAPES) def test_convert_packed_uint4b8_to_signed_int4_inplace(shape): """ The W4A16 checkpoints encode the weights as int4b8 packed to int32. The CUTLASS kernels expect signed int4 packed to int32. This tests checks that the runtime int4b8 -> signed int4 conversion matches the offline conversion step exactly. """ _, N, K = shape # random weights packed to int32 t = torch.randint( low=torch.iinfo(torch.int32).min, high=torch.iinfo(torch.int32).max + 1, size=(N, K // 8), dtype=torch.int32, device="cuda", ) # compute reference unpacked = unpack_quantized_values_into_int32( t.clone(), scalar_types.uint4b8, packed_dim=1 ) unpacked = unpacked - 8 # int4b8 -> signed int4 ref = pack_cols(unpacked & 0x0F, 4, *unpacked.shape) out = convert_packed_uint4b8_to_signed_int4_inplace(t.clone()) assert torch.equal(ref, out) assert not torch.equal(ref, t)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_machete_mm.py
tests/kernels/quantization/test_machete_mm.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Tests for the machete kernel. Run `pytest tests/kernels/quantization/test_machete_mm.py`. """ import math from dataclasses import dataclass, fields import pytest import torch from tests.kernels.utils import opcheck from vllm import _custom_ops as ops from vllm.model_executor.layers.quantization.utils.machete_utils import ( query_machete_supported_group_sizes, ) from vllm.model_executor.layers.quantization.utils.quant_utils import ( pack_rows, quantize_weights, ) from vllm.platforms import current_platform from vllm.scalar_type import ScalarType, scalar_types if current_platform.is_rocm(): pytest.skip( "These tests require machete_prepack_B, not supported on ROCm.", allow_module_level=True, ) CUDA_DEVICES = [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)] # TODO: in future PR refactor this and `is_quant_method_supported` in the kernel # unit tests to a common utility function. Currently the use of # `is_quant_method_supported` conflates kernels with quantization methods # an assumption which is breaking down as quantizations methods can have # have kernels and some kernels support multiple quantization methods. IS_SUPPORTED_BY_GPU = current_platform.get_device_capability()[0] >= 9 MNK_SHAPES = [ (1, 128, 128), (1, 8192, 28672), (13, 8192, 4096), (26, 4096, 8192), (64, 4096, 4096), (64, 8192, 28672), (257, 128, 4096), (257, 4224, 4160), (1024, 8192, 4096), ] @dataclass class TypeConfig: act_type: torch.dtype weight_type: ScalarType output_type: torch.dtype | None group_scale_type: torch.dtype | None group_zero_type: torch.dtype | None channel_scale_type: torch.dtype | None token_scale_type: torch.dtype | None @dataclass class Tensors: w_ref: torch.Tensor a_ref: torch.Tensor a: torch.Tensor w_q: torch.Tensor w_g_s: torch.Tensor | None w_g_zp: torch.Tensor | None w_ch_s: torch.Tensor | None w_tok_s: torch.Tensor | None # (Act Type, Weight Type, Output Type, Scale Type, ZeroPoints, # Ch Scales Type, Tok Scales Type) # NOTE: None "Scale Type" means the act type is floating point # None "Output Type" means the output type is the same as the act type TestTypeTuple = tuple[ list[torch.dtype], ScalarType, torch.dtype | None, torch.dtype | None, bool ] TEST_TYPES = [ # GPTQ style *( TypeConfig( act_type=a_type, weight_type=w_type, output_type=None, group_scale_type=a_type, group_zero_type=None, channel_scale_type=None, token_scale_type=None, ) for w_type in [scalar_types.uint4b8, scalar_types.uint8b128] for a_type in [torch.float16, torch.bfloat16] ), # AWQ style *( TypeConfig( act_type=a_type, weight_type=w_type, output_type=None, group_scale_type=a_type, group_zero_type=a_type, channel_scale_type=None, token_scale_type=None, ) for w_type in [scalar_types.uint4, scalar_types.uint8] for a_type in [torch.float16, torch.bfloat16] ), # # QQQ style # *(TypeConfig(act_type=torch.int8, # weight_type=scalar_types.uint4b8, # output_type=torch.float16, # group_scale_type=group_scale_type, # group_zero_type=None, # channel_scale_type=torch.float, # token_scale_type=torch.float) # for group_scale_type in [None, torch.float16]), # *(TypeConfig(act_type=torch.float8_e4m3fn, # weight_type=scalar_types.uint4b8, # output_type=torch.float16, # group_scale_type=group_scale_type, # group_zero_type=None, # channel_scale_type=torch.float, # token_scale_type=torch.float) # for group_scale_type in [None, torch.float16]), ] # TODO: in future PR refactor this and `is_quant_method_supported` in the kernel # unit tests to a common utility function. Currently the use of # `is_quant_method_supported` conflates kernels with quantization methods # an assumption which is breaking down as quantizations methods can have # have kernels and some kernels support multiple quantization methods. IS_SUPPORTED_BY_GPU = current_platform.has_device_capability(90) def rand_data(shape, dtype=torch.float16, scale=1, offset=0): if dtype.is_floating_point: return (scale * torch.rand(shape, device="cuda") - offset).to(dtype) else: return torch.randint(-8, 7, shape, dtype=dtype, device="cuda") def maybe_convert_zeropoints(zps: torch.Tensor | None, s: torch.Tensor): return zps if zps is None else -1 * s * (zps.to(s.dtype)) def group_size_valid(shape: tuple[int, int, int], group_size: int | None) -> bool: return group_size is None or group_size == -1 or shape[2] % group_size == 0 def machete_quantize_and_pack( atype: torch.dtype, w: torch.Tensor, wtype: ScalarType, stype: torch.dtype | None, group_size: int | None, zero_points: bool = False, ): assert wtype.is_integer(), "TODO: support floating point weights" w_ref, w_q, w_s, w_zp = quantize_weights( w, wtype, group_size=group_size, zero_points=zero_points, # to match how the kernel applies zps ref_zero_points_after_scales=True, ) w_q = pack_rows(w_q, wtype.size_bits, *w_q.shape) w_q = w_q.t().contiguous().t() # convert to col major w_q_machete = ops.machete_prepack_B(w_q, atype, wtype, stype) opcheck(torch.ops._C.machete_prepack_B, (w_q, atype, wtype.id, stype)) return w_ref, w_q_machete, w_s, w_zp def create_test_tensors( shape: tuple[int, int, int], types: TypeConfig, group_size: int | None, subset_stride_factor: int | None = None, ) -> Tensors: m, n, k = shape factor = subset_stride_factor or 1 print( "create_test_tensors, shape:", shape, "types:", types, "group_size:", group_size ) a = rand_data((m * factor, k * factor), types.act_type, scale=3, offset=2) w = rand_data((k * factor, n * factor), types.act_type, scale=3, offset=1) if factor > 1: a = a[0:m, 0:k] w = w[0:k, 0:n] if types.group_scale_type is not None: w = w.to(types.group_scale_type) if w.dtype.itemsize == 1: w = w.to(torch.float16) w_ref, w_q_packed, w_s, w_zp = machete_quantize_and_pack( a.dtype, w, types.weight_type, types.group_scale_type, group_size, types.group_zero_type is not None, ) if not a.dtype.is_floating_point: aiinfo = torch.iinfo(a.dtype) w_ref = w_ref.round().clamp(aiinfo.min, aiinfo.max) a_ref = a.to(torch.float32) w_ref = w_ref.to(torch.float32) w_ch_s = ( None if types.channel_scale_type is None else rand_data((n,), types.channel_scale_type) ) w_tok_s = ( None if types.token_scale_type is None else rand_data((m,), types.token_scale_type) ) return Tensors( w_ref=w_ref, a_ref=a_ref, a=a, w_q=w_q_packed, w_g_s=w_s, w_g_zp=maybe_convert_zeropoints(w_zp, w_s), w_ch_s=w_ch_s, w_tok_s=w_tok_s, ) # None stype means scales use the same dtype as a def machete_mm_test_helper( types: TypeConfig, tensors: Tensors, group_size: int | None = None, schedule: str | None = None, ): output_ref = torch.matmul(tensors.a_ref, tensors.w_ref) output_ref_type = output_ref.dtype if tensors.w_ch_s is not None: output_ref = ( output_ref.to(tensors.w_ch_s.dtype) * tensors.w_ch_s.unsqueeze(0) ).to(output_ref_type) if tensors.w_tok_s is not None: output_ref = ( output_ref.to(tensors.w_tok_s.dtype) * tensors.w_tok_s.unsqueeze(1) ).to(output_ref_type) output = ops.machete_mm( a=tensors.a, b_q=tensors.w_q, b_type=types.weight_type, b_group_scales=tensors.w_g_s, b_group_zeros=tensors.w_g_zp, b_group_size=group_size, b_channel_scales=tensors.w_ch_s, a_token_scales=tensors.w_tok_s, out_type=types.output_type, schedule=schedule, ) print(output) print(output_ref) # Relax atol as our reduction dim becomes larger (more rounding error) # Relax atol when we have zeropoints since the way machete applies # zeropoints (after scales) causes noise around 0 atol = ( 1 if tensors.w_g_zp is not None else min(5e-2 * math.sqrt(tensors.a.shape[1]), 1) ) rtol = 1e-1 if tensors.a.element_size() >= 2 else 2e-1 torch.testing.assert_close( output, output_ref.to(output.dtype), rtol=rtol, atol=atol ) @pytest.mark.skipif( not IS_SUPPORTED_BY_GPU, reason="Machete is not supported on this GPU type." ) @pytest.mark.parametrize("shape", MNK_SHAPES, ids=lambda x: "x".join(str(v) for v in x)) @pytest.mark.parametrize("types", TEST_TYPES) def test_machete_all_schedules(shape, types: TypeConfig): group_sizes: list[int | None] = [] if types.group_scale_type is None: group_sizes = [None] else: group_sizes = query_machete_supported_group_sizes(types.act_type) for group_size in group_sizes: if not group_size_valid(shape, group_size): continue tensors = create_test_tensors(shape, types, group_size) print(f"MNK = {shape}") for schedule in ops.machete_supported_schedules( types.act_type, types.weight_type, group_scales_type=types.group_scale_type, group_zeros_type=types.group_scale_type, out_type=types.output_type, ): print(f"Testing schedule {schedule}") machete_mm_test_helper(types, tensors, group_size, schedule) @pytest.mark.skipif( not IS_SUPPORTED_BY_GPU, reason="Machete is not supported on this GPU type." ) @pytest.mark.parametrize("shape", MNK_SHAPES, ids=lambda x: "x".join(str(v) for v in x)) @pytest.mark.parametrize("types", TEST_TYPES) def test_machete_heuristic(shape, types: TypeConfig): group_sizes: list[int | None] = [] if types.group_scale_type is None: group_sizes = [None] else: group_sizes = query_machete_supported_group_sizes(types.act_type) for group_size in group_sizes: if not group_size_valid(shape, group_size): continue tensors = create_test_tensors(shape, types, group_size) machete_mm_test_helper(types, tensors, group_size) # Test working on other devices @pytest.mark.skipif( not IS_SUPPORTED_BY_GPU, reason="Machete is not supported on this GPU type." ) @pytest.mark.parametrize("device", CUDA_DEVICES) def test_machete_devices(device: str): group_size = 128 type_config = TypeConfig( act_type=torch.float16, weight_type=scalar_types.uint4b8, output_type=None, group_scale_type=torch.float16, group_zero_type=None, channel_scale_type=None, token_scale_type=None, ) tensors = create_test_tensors((512, 4096, 4096), type_config, group_size) for field in fields(Tensors): tensor = getattr(tensors, field.name) if isinstance(tensor, torch.Tensor): setattr(tensors, field.name, tensor.to(device)) machete_mm_test_helper(type_config, tensors, group_size) # Test working with a subset of A and B @pytest.mark.skipif( not IS_SUPPORTED_BY_GPU, reason="Machete is not supported on this GPU type." ) def test_machete_subset(): group_size = 128 type_config = TypeConfig( act_type=torch.float16, weight_type=scalar_types.uint4b8, output_type=None, group_scale_type=torch.float16, group_zero_type=None, channel_scale_type=None, token_scale_type=None, ) tensors = create_test_tensors( (512, 4096, 4096), type_config, group_size, subset_stride_factor=2 ) machete_mm_test_helper(type_config, tensors, group_size) # Test to make sure cuda graphs work class MacheteLayer(torch.nn.Module): def __init__(self, **kwargs): super().__init__() self.kwargs = kwargs def forward(self, a): return ops.machete_mm(a=a, **self.kwargs) @pytest.mark.skipif( not IS_SUPPORTED_BY_GPU, reason="Machete is not supported on this GPU type." ) def test_machete_cuda_graph(): m, n, k = 512, 4096, 4096 a = rand_data((m, k), torch.float16) b = rand_data((k, n), torch.float16) wtype = scalar_types.uint4b8 stype = torch.float16 group_size = 128 zero_points = False w_ref, w_q_packed, w_s, w_zp = machete_quantize_and_pack( a.dtype, b, wtype, stype, group_size, zero_points ) # Construct a trivial model with a single layer that calls a machete kernel model = MacheteLayer( b_q=w_q_packed, b_type=wtype, b_group_scales=w_s, b_group_zeros=maybe_convert_zeropoints(w_zp, w_s), b_group_size=group_size, ) output_ref = torch.matmul(a, w_ref) # Run the model with a cuda graph stream = torch.cuda.Stream() with torch.cuda.stream(stream): g = torch.cuda.CUDAGraph() with torch.cuda.graph(g): output = model(a) output.zero_() g.replay() # Relax atol as our reduction dim becomes larger (more rounding error) # Relax atol when we have zeropoints since the way machete applies # zeropoints (after scales) causes noise around 0 atol = 1 if zero_points else min(5e-2 * math.sqrt(k), 1) torch.testing.assert_close(output, output_ref, rtol=1e-1, atol=atol)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_mxfp4_qutlass.py
tests/kernels/quantization/test_mxfp4_qutlass.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # # Copyright (C) 2025 Roberto L. Castro (Roberto.LopezCastro@ist.ac.at). # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import pytest import torch from compressed_tensors.transform.utils.hadamard import deterministic_hadamard_matrix from vllm._custom_ops import fusedQuantizeMx, matmul_mxf4_bf16_tn from vllm.model_executor.layers.quantization.qutlass_utils import to_blocked from vllm.platforms import current_platform if not torch.cuda.is_available(): pytest.skip("CUDA required for these tests.", allow_module_level=True) if not ( current_platform.has_device_capability(100) or current_platform.has_device_capability(120) ): pytest.skip( reason="Tests require compute capability 10.0 (100) or 12.0 (120).", allow_module_level=True, ) # ----- Helpers ----- def get_hadamard_matrix(group_size: int, dtype: torch.dtype, device: torch.device): return ( deterministic_hadamard_matrix(group_size, dtype=dtype, device=device) * group_size**-0.5 ) def _rtne_fp4(x: torch.Tensor): device = x.device grid = torch.tensor( [ -6.0, -4.0, -3.0, -2.0, -1.5, -1.0, -0.5, -0.0, 0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 6.0, ], dtype=x.dtype, device=x.device, ) grid_int = torch.tensor( [-1, -2, -3, -4, -5, -6, -7, -8, 0, 1, 2, 3, 4, 5, 6, 7], dtype=torch.uint8, device=device, ) inds = torch.bucketize(x, grid) lo, hi = (inds - 1).clamp(min=0, max=15), inds.clamp(min=0, max=15) g_lo, g_hi = grid[lo], grid[hi] pick_hi = (g_hi - x < x - g_lo) | (g_hi - x == x - g_lo) & (grid_int[hi] % 2 == 0) y = torch.where(pick_hi, g_hi, g_lo) y_int = torch.where(pick_hi, grid_int[hi], grid_int[lo]) y_int_packed = (y_int[..., 1::2] & 0xF) << 4 | y_int[..., ::2] & 0xF return y, y_int_packed def _dq_fp4(x_e2m1: torch.Tensor, x_e8m0: torch.Tensor, alpha: float): device = x_e2m1.device x_e2m1_i32 = x_e2m1.view(dtype=torch.uint8).to(dtype=torch.int32) x_e2m1_unpacked = torch.stack( [x_e2m1_i32 & 0xF, (x_e2m1_i32 >> 4) & 0xF], dim=-1 ).flatten(start_dim=-2) grid_dq = torch.tensor( [ 0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 6.0, -0.0, -0.5, -1.0, -1.5, -2.0, -3.0, -4.0, -6.0, ], dtype=torch.float64, device=device, ) x_fp4_dq = grid_dq[x_e2m1_unpacked] scales_dq = x_e8m0.to(torch.float64) x_dq = (x_fp4_dq.unflatten(dim=-1, sizes=(-1, 32)) * scales_dq[..., None]).flatten( start_dim=-2 ) / alpha return x_dq, x_fp4_dq, scales_dq def _unpack_mask(clip_mask: torch.Tensor) -> torch.Tensor: clip_mask_unpacked_dq = torch.zeros( *clip_mask.shape[:-1], clip_mask.size(-1) * 8, dtype=torch.bool, device=clip_mask.device, ) for i in range(8): clip_mask_unpacked_dq[..., i::8] = (clip_mask >> i) & 1 return clip_mask_unpacked_dq def _forward_quantize_ref( x: torch.Tensor, h: torch.Tensor, rot_size: int, quest: bool = True ): device = x.device xh_ref64 = ( x.unflatten(dim=-1, sizes=(-1, rot_size)).to(dtype=torch.float64) @ h.reshape(rot_size, rot_size).to(dtype=torch.float64) ).flatten(start_dim=-2) if quest: scales_ref64_ = ( xh_ref64.unflatten(dim=-1, sizes=(-1, 32)).std(dim=-1, correction=0) * (2.92247856 / 6.0) + 1e-8 ) else: abs_max = xh_ref64.unflatten(dim=-1, sizes=(-1, 32)).abs().amax(dim=-1) scales_ref64_ = abs_max + 1e-8 xh_e8m0_ref = scales_ref64_.log2().floor().exp2().to(dtype=torch.float8_e8m0fnu) scales_ref64 = xh_e8m0_ref.to(dtype=torch.float64) xh_scaled_ref64 = ( xh_ref64.unflatten(dim=-1, sizes=(-1, 32)) / scales_ref64[..., None] ).flatten(start_dim=-2) if not quest: xh_scaled_ref64 *= 3 clip_mask_unpacked_ref = xh_scaled_ref64.abs() < 6.0 clip_mask_ref = torch.zeros( *x.shape[:-1], x.size(-1) // 8, dtype=torch.uint8, device=device ) for i in range(8): clip_mask_ref |= clip_mask_unpacked_ref[..., i::8].to(dtype=torch.uint8) << i xh_fp4_ref, xh_e2m1_ref = _rtne_fp4(xh_scaled_ref64) xh_dq, xh_fp4_dq, scales_dq = _dq_fp4( xh_e2m1_ref, xh_e8m0_ref, alpha=1.0 if quest else 3.0 ) clip_mask_unpacked_dq = _unpack_mask(clip_mask_ref) assert xh_fp4_dq.equal(xh_fp4_ref) assert scales_dq.equal(scales_ref64) assert clip_mask_unpacked_dq.equal(clip_mask_unpacked_ref) return ( xh_dq, clip_mask_unpacked_ref, (xh_e2m1_ref, xh_e8m0_ref, clip_mask_ref), ) DTYPE = torch.bfloat16 DEVICE = torch.device("cuda:0") ROT_SIZES = [32, 64, 128] SEEDS = [0] BATCHES = [1, 16] LLAMA_MODELS = { "7B": [(4096, 3 * 4096), (4096, 4096), (4096, 2 * 10752), (10752, 4096)], "13B": [(5120, 3 * 5120), (5120, 5120), (5120, 2 * 13568), (13568, 5120)], "33B": [(6656, 3 * 6656), (6656, 6656), (6656, 2 * 17664), (17664, 6656)], "70B": [(8192, 3 * 8192), (8192, 8192), (8192, 2 * 21760), (21760, 8192)], } @pytest.fixture(autouse=True) def _seed_each_test(): current_platform.seed_everything(0) np.random.seed(0) torch.random.manual_seed(0) @pytest.mark.parametrize("rot_size", ROT_SIZES) @torch.inference_mode() def test_fused_quantization_absmax(rot_size: int): dtype, device = DTYPE, DEVICE h = get_hadamard_matrix(rot_size, dtype, device) x = torch.randn(2, 4096, 4096, dtype=dtype, device=device) * 25.0 xh_dq_ref, _, _ = _forward_quantize_ref(x, h, rot_size, quest=False) xh_e2m1, xh_e8m0 = fusedQuantizeMx(x, h, method="abs_max") xh_e8m0 = xh_e8m0.reshape(2, 4096, 4096 // 32) xh_dq, *_ = _dq_fp4(xh_e2m1, xh_e8m0, alpha=3.0) torch.testing.assert_close(xh_dq, xh_dq_ref, rtol=0.34, atol=100) assert (xh_dq != xh_dq_ref).float().mean() <= 1e-4 m, n, k = 1, 504, 4096 a = torch.randn(m, k, dtype=dtype, device=device) * 25.0 b = torch.randn(n, k, dtype=dtype, device=device) * 25.0 a_e2m1, a_e8m0 = fusedQuantizeMx(a, h, method="abs_max") b_e2m1, b_e8m0 = fusedQuantizeMx(b, h, method="abs_max") a_dq, *_ = _dq_fp4(a_e2m1, a_e8m0[:m, :k], alpha=1.0) b_dq, *_ = _dq_fp4(b_e2m1, b_e8m0[:n, :k], alpha=1.0) out_ref = a_dq @ b_dq.transpose(-2, -1) a_scale_block = to_blocked(a_e8m0, backend="triton") b_scale_block = to_blocked(b_e8m0, backend="triton") alpha = torch.tensor([1.0], device=device) out = matmul_mxf4_bf16_tn(a_e2m1, b_e2m1, a_scale_block, b_scale_block, alpha) assert out.equal(out_ref.to(dtype=out.dtype)) @pytest.mark.parametrize("rot_size", ROT_SIZES) @torch.inference_mode() def test_fused_quantization_quest(rot_size: int): dtype, device = DTYPE, DEVICE h = get_hadamard_matrix(rot_size, dtype, device) x = torch.randn(2, 4096, 4096, dtype=dtype, device=device) * 25.0 xh_dq_ref, _, _ = _forward_quantize_ref(x, h, rot_size, quest=True) xh_e2m1, xh_e8m0 = fusedQuantizeMx(x, h, method="quest") xh_e8m0 = xh_e8m0.reshape(2, 4096, 4096 // 32) xh_dq, *_ = _dq_fp4(xh_e2m1, xh_e8m0, alpha=1.0) torch.testing.assert_close(xh_dq, xh_dq_ref, rtol=0.34, atol=100) assert (xh_dq != xh_dq_ref).float().mean() <= 1e-4 m, n, k = 504, 504, 2048 a = torch.randn(m, k, dtype=dtype, device=device) * 25.0 b = torch.randn(n, k, dtype=dtype, device=device) * 25.0 a_e2m1, a_e8m0 = fusedQuantizeMx(a, h, method="quest") b_e2m1, b_e8m0 = fusedQuantizeMx(b, h, method="quest") a_dq, *_ = _dq_fp4(a_e2m1, a_e8m0[:m, :k], alpha=1.0) b_dq, *_ = _dq_fp4(b_e2m1, b_e8m0[:n, :k], alpha=1.0) out_ref = a_dq @ b_dq.transpose(-2, -1) a_scale_block = to_blocked(a_e8m0, backend="triton") b_scale_block = to_blocked(b_e8m0, backend="triton") alpha = torch.tensor([1.0], device=device) out = matmul_mxf4_bf16_tn(a_e2m1, b_e2m1, a_scale_block, b_scale_block, alpha) assert out.equal(out_ref.to(dtype=out.dtype)) @pytest.mark.parametrize("model", list(LLAMA_MODELS.keys())) @pytest.mark.parametrize("layer_idx", [0, 1, 2, 3]) @pytest.mark.parametrize("batch", [1, 16]) @pytest.mark.parametrize("had_size", ROT_SIZES) @torch.inference_mode() def test_llama_shapes(model: str, layer_idx: int, batch: int, had_size: int): dtype, device = DTYPE, DEVICE m = batch k, n = LLAMA_MODELS[model][layer_idx] h = get_hadamard_matrix(had_size, dtype, device) a = torch.rand(m, k, dtype=dtype, device=device) * 25.0 b = torch.rand(n, k, dtype=dtype, device=device) * 25.0 a_e2m1, a_e8m0 = fusedQuantizeMx(a, h, method="quest") b_e2m1, b_e8m0 = fusedQuantizeMx(b, h, method="quest") a_dq, *_ = _dq_fp4(a_e2m1, a_e8m0[:m, :k], alpha=1.0) b_dq, *_ = _dq_fp4(b_e2m1, b_e8m0[:n, :k], alpha=1.0) out_ref = a_dq @ b_dq.transpose(-2, -1) a_scale_block = to_blocked(a_e8m0, backend="triton") b_scale_block = to_blocked(b_e8m0, backend="triton") alpha = torch.tensor([1.0], device=device) out = matmul_mxf4_bf16_tn(a_e2m1, b_e2m1, a_scale_block, b_scale_block, alpha) assert out.equal(out_ref.to(dtype=out.dtype))
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/quantization/test_cutlass_w4a8_moe.py
tests/kernels/quantization/test_cutlass_w4a8_moe.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Tests for the CUTLASS-based W4A8 grouped GEMM kernel and the full MoE layer. """ import random from dataclasses import dataclass import pytest import torch from vllm import _custom_ops as ops from vllm.model_executor.layers.quantization.utils.quant_utils import ( pack_rows, quantize_weights, ) from vllm.platforms import current_platform from vllm.scalar_type import ScalarType, scalar_types IS_SUPPORTED_BY_GPU = ( current_platform.is_cuda() and current_platform.get_device_capability()[0] >= 9 ) def to_fp8(tensor: torch.Tensor) -> torch.Tensor: finfo = torch.finfo(torch.float8_e4m3fn) return tensor.clamp(min=finfo.min, max=finfo.max).to(dtype=torch.float8_e4m3fn) def cutlass_quantize( atype: torch.dtype, w: torch.Tensor, wtype: ScalarType, stype: torch.dtype | None, group_size: int | None, zero_points: bool = False, ): """ Quantize weights into W4 and compute reference dequantized weights. Encoding/reordering of weights and packing of scales is deferred until after all experts are combined. """ assert wtype.is_integer(), "TODO: support floating point weights" w_ref, w_q, w_s, w_zp = quantize_weights( w, wtype, group_size=group_size, zero_points=zero_points ) # Since scales are later cast to fp8, recompute w_ref in atype here. w_ref = ( w_q.to(torch.float32) * w_s.to(atype).to(torch.float32).repeat_interleave(group_size, dim=0) ).to(atype) # Bit mask prevents sign extension of int4 when packing. w_q = pack_rows(w_q & 0x0F, wtype.size_bits, *w_q.shape) # Make weights row-major (N, K). w_q = w_q.t().contiguous() return w_ref, w_q, w_s.to(atype), w_zp def cutlass_preprocess( w_q_experts: list[torch.Tensor], w_s_experts: list[torch.Tensor] ): """ Reorder/encode expert weights and pack scales. Returns: w_q_packed: Packed/encoded int4 weights for all experts. w_s_packed: Packed fp8 scales for all experts. packed_layout: Layout/stride metadata for grouped GEMM. """ w_s_packed = ops.cutlass_pack_scale_fp8(torch.stack(w_s_experts)) w_q_packed, packed_layout = ops.cutlass_encode_and_reorder_int4b_grouped( torch.stack(w_q_experts) ) # expects dim 3 return w_q_packed, w_s_packed, packed_layout GROUP_SIZE = 128 # (num_experts, N, K) TEST_SHAPES = [ (8, 512, 2048), (8, 2048, 2048), (64, 512, 1024), (64, 2048, 2048), (4, 2048, 768), (8, 768, 2048), (64, 1536, 2048), (128, 8192, 4096), # test overflow int32 ] ALIGNMENT = 16 # torch._scaled_mm alignment for M, needed for reference check @dataclass class MoETestSetup: num_experts: int K: int N: int Ms: list[int] M_full: int a: torch.Tensor a_ref: torch.Tensor a_strides: torch.Tensor out: torch.Tensor c_strides: torch.Tensor per_tok_scales: torch.Tensor per_chan_scales: torch.Tensor w_refs: list[torch.Tensor] w_q_packed: torch.Tensor w_s_packed: torch.Tensor problem_sizes: torch.Tensor expert_offsets: torch.Tensor b_strides: torch.Tensor group_scale_strides: torch.Tensor def make_moe_test_setup( num_experts: int, K: int, N: int, *, alignment: int = ALIGNMENT, max_blocks: int = 64, device: str = "cuda", random_zero: bool = False, ) -> MoETestSetup: """Create a full set of tensors for testing cutlass_w4a8_moe_mm.""" assert K % GROUP_SIZE == 0 # Token counts per expert (multiples of `alignment`). Ms = [alignment * random.randint(1, max_blocks) for _ in range(num_experts)] # set random experts to 0 tokens if random_zero and num_experts > 1: num_zero = max(1, num_experts // 8) zero_indices = random.sample(range(num_experts), k=num_zero) for idx in zero_indices: Ms[idx] = 0 M_full = sum(Ms) assert M_full > 0 # Activations. a = to_fp8(torch.randn((M_full, K), device=device)) a_ref = a.to(torch.float32) a_strides = torch.full((num_experts,), K, dtype=torch.int64, device=device) # Output buffer. out = torch.empty((M_full, N), dtype=torch.bfloat16, device=device) c_strides = torch.full((num_experts,), N, dtype=torch.int64, device=device) # Channel/token scales. per_tok_scales = torch.randn((M_full, 1), dtype=torch.float32, device=device) per_chan_scales = torch.randn( (num_experts, N, 1), dtype=torch.float32, device=device ) # Expert weights and scales. wtype = scalar_types.int4 atype = stype = torch.float8_e4m3fn w_refs, w_qs, w_ss = [], [], [] for _ in range(num_experts): b = to_fp8(torch.randn((K, N), device=device)) w_ref, w_q, w_s, _ = cutlass_quantize( atype, b.to(torch.float16), wtype, stype, GROUP_SIZE, zero_points=False ) w_refs.append(w_ref) w_qs.append(w_q) w_ss.append(w_s) w_q_packed, w_s_packed, packed_layout = cutlass_preprocess(w_qs, w_ss) problem_sizes = torch.tensor( [[N, M, K] for M in Ms], dtype=torch.int32, device=device ) expert_offsets = torch.cat( [ torch.tensor([0], dtype=torch.int64), torch.cumsum(torch.tensor(Ms, dtype=torch.int64), dim=0)[:-1], ] ).to(device=device) # B strides and group scale strides. b_strides = packed_layout group_scale_strides = torch.zeros( (num_experts, 2), dtype=torch.int64, device=device ) group_scale_strides[:, 0] = N return MoETestSetup( num_experts=num_experts, K=K, N=N, Ms=Ms, M_full=M_full, a=a, a_ref=a_ref, a_strides=a_strides, out=out, c_strides=c_strides, per_tok_scales=per_tok_scales, per_chan_scales=per_chan_scales, w_refs=w_refs, w_q_packed=w_q_packed, w_s_packed=w_s_packed, problem_sizes=problem_sizes, expert_offsets=expert_offsets, b_strides=b_strides, group_scale_strides=group_scale_strides, ) def compute_moe_reference_output(setup: MoETestSetup) -> torch.Tensor: """Compute reference output using torch._scaled_mm per expert.""" out_ref = torch.empty_like(setup.out) ends = torch.cumsum(torch.tensor(setup.Ms), 0).tolist() starts = setup.expert_offsets.cpu().tolist() for i in range(setup.num_experts): start, end = starts[i], ends[i] if start == end: continue out_ref_i = torch._scaled_mm( setup.a_ref[start:end].to(torch.float8_e4m3fn), setup.w_refs[i].to(torch.float8_e4m3fn).t().contiguous().t(), setup.per_tok_scales[start:end], # (M, 1) setup.per_chan_scales[i].reshape(1, -1), # (1, N) out_dtype=torch.bfloat16, use_fast_accum=True, ) out_ref[start:end] = out_ref_i return out_ref @pytest.mark.skipif( not IS_SUPPORTED_BY_GPU, reason="W4A8 Grouped GEMM is not supported on this GPU type.", ) @pytest.mark.parametrize("shape", TEST_SHAPES) @pytest.mark.parametrize("random_zero", [True, False]) def test_cutlass_w4a8_moe_mm_end_to_end(shape, random_zero): num_experts, N, K = shape current_platform.seed_everything(42) setup = make_moe_test_setup( num_experts=num_experts, K=K, N=N, max_blocks=64, random_zero=random_zero ) ops.cutlass_w4a8_moe_mm( setup.out, setup.a, setup.w_q_packed, setup.per_tok_scales, setup.per_chan_scales, setup.w_s_packed, GROUP_SIZE, setup.expert_offsets, setup.problem_sizes, setup.a_strides, setup.b_strides, setup.c_strides, setup.group_scale_strides, ) torch.cuda.synchronize() out_ref = compute_moe_reference_output(setup) torch.testing.assert_close(setup.out, out_ref, rtol=1e-2, atol=1e-2) class W4A8MoELayer(torch.nn.Module): """ Minimal wrapper module to test cuda graphs """ def __init__(self, setup: MoETestSetup): super().__init__() self.setup = setup def forward(self, a: torch.Tensor) -> torch.Tensor: s = self.setup ops.cutlass_w4a8_moe_mm( s.out, a, s.w_q_packed, s.per_tok_scales, s.per_chan_scales, s.w_s_packed, GROUP_SIZE, s.expert_offsets, s.problem_sizes, s.a_strides, s.b_strides, s.c_strides, s.group_scale_strides, ) return s.out @pytest.mark.skipif( not IS_SUPPORTED_BY_GPU, reason="W4A8 Grouped GEMM is not supported on this GPU type.", ) def test_cutlass_w4a8_moe_mm_cuda_graph(): current_platform.seed_everything(42) # Fixed config for CUDA graph test (single parameter point). num_experts = 8 K = 512 N = 2048 setup = make_moe_test_setup( num_experts=num_experts, K=K, N=N, max_blocks=32, ) # Construct model that calls the grouped GEMM kernel. model = W4A8MoELayer(setup) # Build reference output once. out_ref = compute_moe_reference_output(setup) # Capture and run the model in a CUDA graph. a_static = setup.a.clone() # static input tensor for graph replay stream = torch.cuda.Stream() with torch.cuda.stream(stream): g = torch.cuda.CUDAGraph() with torch.cuda.graph(g): out_static = model(a_static) out_static.zero_() g.replay() torch.testing.assert_close(out_static, out_ref, rtol=1e-2, atol=1e-2)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/mamba/test_causal_conv1d.py
tests/kernels/mamba/test_causal_conv1d.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch import torch.nn.functional as F from einops import rearrange from vllm.attention.backends.utils import PAD_SLOT_ID from vllm.model_executor.layers.mamba.ops.causal_conv1d import ( causal_conv1d_fn, causal_conv1d_update, ) from vllm.platforms import current_platform def causal_conv1d_ref( x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor | None = None, initial_states: torch.Tensor | None = None, return_final_states: bool = False, final_states_out: torch.Tensor | None = None, activation: str | None = "silu", ): """ x: (batch, dim, seqlen) weight: (dim, width) bias: (dim,) initial_states: (batch, dim, width - 1) final_states_out: (batch, dim, width - 1) out: (batch, dim, seqlen) """ if activation not in [None, "silu", "swish"]: raise NotImplementedError("activation must be None, silu, or swish") dtype_in = x.dtype x = x.to(weight.dtype) seqlen = x.shape[-1] dim, width = weight.shape if initial_states is None: out = F.conv1d(x, weight.unsqueeze(1), bias, padding=width - 1, groups=dim) else: x = torch.cat([initial_states, x], dim=-1) out = F.conv1d(x, weight.unsqueeze(1), bias, padding=0, groups=dim) out = out[..., :seqlen] if return_final_states: final_states = F.pad(x, (width - 1 - x.shape[-1], 0)).to( dtype_in ) # (batch, dim, width - 1) if final_states_out is not None: final_states_out.copy_(final_states) else: final_states_out = final_states out = (out if activation is None else F.silu(out)).to(dtype=dtype_in) return (out, None) if not return_final_states else (out, final_states_out) def causal_conv1d_update_ref( x, conv_state, weight, bias=None, activation=None, cache_seqlens=None ): """ x: (batch, dim) or (batch, dim, seqlen) conv_state: (batch, dim, state_len), where state_len >= width - 1 weight: (dim, width) bias: (dim,) cache_seqlens: (batch,), dtype int32. If not None, the conv_state is treated as a circular buffer. The conv_state will be updated by copying x to the conv_state starting at the index @cache_seqlens % state_len before performing the convolution. out: (batch, dim) or (batch, dim, seqlen) """ if activation not in [None, "silu", "swish"]: raise NotImplementedError("activation must be None, silu, or swish") dtype_in = x.dtype unsqueeze = x.dim() == 2 if unsqueeze: x = x.unsqueeze(-1) batch, dim, seqlen = x.shape width = weight.shape[1] state_len = conv_state.shape[-1] assert conv_state.shape == (batch, dim, state_len) assert weight.shape == (dim, width) if cache_seqlens is None: x_new = torch.cat([conv_state, x], dim=-1).to( weight.dtype ) # (batch, dim, state_len + seqlen) conv_state.copy_(x_new[:, :, -state_len:]) else: width_idx = torch.arange( -(width - 1), 0, dtype=torch.long, device=x.device ).unsqueeze(0) + cache_seqlens.unsqueeze(1) width_idx = ( torch.remainder(width_idx, state_len).unsqueeze(1).expand(-1, dim, -1) ) x_new = torch.cat([conv_state.gather(2, width_idx), x], dim=-1).to(weight.dtype) copy_idx = torch.arange(seqlen, dtype=torch.long, device=x.device).unsqueeze( 0 ) + cache_seqlens.unsqueeze(1) copy_idx = torch.remainder(copy_idx, state_len).unsqueeze(1).expand(-1, dim, -1) conv_state.scatter_(2, copy_idx, x) out = F.conv1d(x_new, weight.unsqueeze(1), bias, padding=0, groups=dim)[ :, :, -seqlen: ] if unsqueeze: out = out.squeeze(-1) return (out if activation is None else F.silu(out)).to(dtype=dtype_in) @pytest.mark.parametrize("itype", [torch.bfloat16, torch.float]) @pytest.mark.parametrize("silu_activation", [True]) @pytest.mark.parametrize("has_bias", [True]) def causal_conv1d_opcheck_fn( x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor | None = None, cu_seq_len: torch.Tensor | None = None, cache_indices: torch.Tensor | None = None, has_initial_state: torch.Tensor | None = None, conv_states: torch.Tensor | None = None, activation: str | None = "silu", pad_slot_id: int = PAD_SLOT_ID, ): """ x: (batch, dim, seqlen) weight: (dim, width) bias: (dim,) seq_idx: (batch, seqlen) initial_states: (batch, dim, width - 1) final_states_out: (batch, dim, width - 1), to be written to activation: either None or "silu" or "swish" out: (batch, dim, seqlen) """ if activation not in [None, "silu", "swish"]: raise NotImplementedError("activation must be None, silu, or swish") if x.stride(-1) != 1: x = x.contiguous() bias = bias.contiguous() if bias is not None else None @pytest.mark.parametrize("itype", [torch.bfloat16]) @pytest.mark.parametrize("silu_activation", [False, True]) @pytest.mark.parametrize("has_bias", [False, True]) @pytest.mark.parametrize("seqlen", [1]) @pytest.mark.parametrize("width", [4]) @pytest.mark.parametrize("dim", [2048, 2048 + 16, 4096]) def test_causal_conv1d_update(dim, width, seqlen, has_bias, silu_activation, itype): device = "cuda" rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3) if itype == torch.bfloat16: rtol, atol = 1e-2, 5e-2 # set seed current_platform.seed_everything(0) batch = 2 x = torch.randn(batch, dim, seqlen, device=device, dtype=itype) x_ref = x.clone() conv_state = torch.randn(batch, dim, width - 1, device=device, dtype=itype) weight = torch.randn(dim, width, device=device, dtype=itype) bias = torch.randn(dim, device=device, dtype=itype) if has_bias else None conv_state_ref = conv_state.detach().clone() activation = None if not silu_activation else "silu" conv_state_indices = torch.arange(batch, dtype=torch.int32, device=device) out = causal_conv1d_update( x, conv_state, weight, bias, activation=activation, conv_state_indices=conv_state_indices, ) out_ref = causal_conv1d_update_ref( x_ref, conv_state_ref, weight, bias, activation=activation ) assert torch.equal(conv_state, conv_state_ref) assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) @pytest.mark.parametrize("itype", [torch.float32, torch.bfloat16]) @pytest.mark.parametrize("silu_activation", [False, True]) @pytest.mark.parametrize("has_bias", [False, True]) @pytest.mark.parametrize("seqlen", [1, 3]) @pytest.mark.parametrize("width", [3, 4]) @pytest.mark.parametrize("dim", [2048 + 16, 4096]) # tests correctness in case subset of the sequences are padded @pytest.mark.parametrize("with_padding", [True, False]) @pytest.mark.parametrize("batch_size", [3]) def test_causal_conv1d_update_with_batch_gather( batch_size, with_padding, dim, width, seqlen, has_bias, silu_activation, itype ): device = "cuda" rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3) if itype == torch.bfloat16: rtol, atol = 1e-2, 5e-2 # set seed current_platform.seed_everything(0) padding = 5 if with_padding else 0 padded_batch_size = batch_size + padding # total_entries = number of cache line total_entries = 10 * batch_size # x will be (batch, dim, seqlen) with contiguous along dim-axis x = torch.randn( padded_batch_size, seqlen, dim, device=device, dtype=itype ).transpose(1, 2) x_ref = x.clone() conv_state_indices = torch.randperm(total_entries)[:batch_size].to( dtype=torch.int32, device=device ) unused_states_bool = torch.ones(total_entries, dtype=torch.bool, device=device) unused_states_bool[conv_state_indices] = False padded_state_indices = torch.concat( [ conv_state_indices, torch.as_tensor([PAD_SLOT_ID] * padding, dtype=torch.int32, device=device), ], dim=0, ) # conv_state will be (cache_lines, dim, state_len) # with contiguous along dim-axis conv_state = torch.randn( total_entries, width - 1, dim, device=device, dtype=itype ).transpose(1, 2) conv_state_for_padding_test = conv_state.clone() weight = torch.randn(dim, width, device=device, dtype=itype) bias = torch.randn(dim, device=device, dtype=itype) if has_bias else None conv_state_ref = conv_state[conv_state_indices, :].detach().clone() activation = None if not silu_activation else "silu" out = causal_conv1d_update( x, conv_state, weight, bias, activation=activation, conv_state_indices=padded_state_indices, pad_slot_id=PAD_SLOT_ID, ) out_ref = causal_conv1d_update_ref( x_ref[:batch_size], conv_state_ref, weight, bias, activation=activation ) assert torch.equal(conv_state[conv_state_indices, :], conv_state_ref) assert torch.equal( conv_state[unused_states_bool], conv_state_for_padding_test[unused_states_bool] ) assert torch.allclose(out[:batch_size], out_ref, rtol=rtol, atol=atol) @pytest.mark.parametrize("itype", [torch.bfloat16]) @pytest.mark.parametrize("silu_activation", [True]) @pytest.mark.parametrize("has_bias", [True]) @pytest.mark.parametrize("width", [4]) @pytest.mark.parametrize("seqlen", [8, 249, 4096]) @pytest.mark.parametrize("dim", [64, 4096]) @pytest.mark.parametrize("with_padding", [True, False]) @pytest.mark.parametrize("batch", [4, 10]) def test_causal_conv1d_varlen( batch, with_padding, dim, seqlen, width, has_bias, silu_activation, itype ): device = "cuda" torch.cuda.empty_cache() rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3) if itype == torch.bfloat16: rtol, atol = 1e-2, 5e-2 # set seed current_platform.seed_everything(0) seqlens = [] batch_size = batch padding = 3 if with_padding else 0 padded_batch_size = batch_size + padding nsplits = padded_batch_size - 1 eos_pos = torch.randperm(seqlen - 1)[:nsplits].sort().values seqlens.append( torch.diff( torch.cat([torch.tensor([-1]), eos_pos, torch.tensor([seqlen - 1])]) ).tolist() ) assert sum(seqlens[-1]) == seqlen assert all(s > 0 for s in seqlens[-1]) total_entries = batch_size * 10 cumsum = torch.cumsum(torch.tensor(seqlens[0]), dim=0).to(torch.int32) cumsum = torch.concat([torch.tensor([0], dtype=torch.int32), cumsum], dim=0) x = rearrange( torch.randn(1, seqlen, 4096 + dim + 64, device=device, dtype=itype), "b s d -> b d s", )[:, 4096 : 4096 + dim, :] weight = torch.randn(dim, width, device=device, dtype=itype) bias = torch.randn(dim, device=device, dtype=itype) if has_bias else None x_ref = x.clone() weight_ref = weight.clone() bias_ref = bias.clone() if bias is not None else None activation = None if not silu_activation else "silu" final_states = torch.randn( total_entries, width - 1, dim, device=x.device, dtype=x.dtype ).transpose(1, 2) final_states_ref = final_states.clone() has_initial_states = torch.randint( 0, 2, (cumsum.shape[0] - 1,), dtype=torch.bool, device=x.device ) state_indices = torch.randperm(total_entries, dtype=torch.int32, device=x.device)[ :batch_size ] padded_state_indices = torch.concat( [ state_indices, torch.as_tensor([PAD_SLOT_ID] * padding, dtype=torch.int32, device=device), ], dim=-1, ) out = causal_conv1d_fn( x.squeeze(0), weight, bias=bias, conv_states=final_states, query_start_loc=cumsum.cuda(), cache_indices=padded_state_indices, has_initial_state=has_initial_states, activation=activation, pad_slot_id=PAD_SLOT_ID, ) out_ref = [] out_ref_b = [] splits = [torch.split(var, seqlens[0], dim=-1) for var in (x_ref)] for i in range(len(seqlens[0])): x_s = [v[i].unsqueeze(0) for v in splits][0] if padded_state_indices[i] == PAD_SLOT_ID: continue out_ref_b.append( causal_conv1d_ref( x_s, weight_ref, bias_ref, activation=activation, return_final_states=True, final_states_out=final_states_ref[padded_state_indices[i]].unsqueeze(0), initial_states=final_states_ref[padded_state_indices[i]].unsqueeze(0) if has_initial_states[i] else None, ) ) out_ref.append(torch.cat([t[0] for t in out_ref_b], dim=2)) out_ref_tensor = torch.cat(out_ref, dim=0) assert torch.allclose( final_states[state_indices], final_states_ref[state_indices], rtol=rtol, atol=atol, ) unpadded_out = out[:, : out_ref_tensor.shape[-1]] assert torch.allclose(unpadded_out, out_ref_tensor, rtol=rtol, atol=atol)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/mamba/test_mamba_ssm_ssd.py
tests/kernels/mamba/test_mamba_ssm_ssd.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch import torch.nn.functional as F from einops import rearrange, repeat from vllm.model_executor.layers.mamba.ops.ssd_combined import ( mamba_chunk_scan_combined_varlen, ) from vllm.platforms import current_platform from vllm.v1.attention.backends.mamba2_attn import compute_varlen_chunk_metadata # Added by the IBM Team, 2024 # Adapted from https://github.com/state-spaces/mamba/blob/v2.2.4/mamba_ssm/modules/ssd_minimal.py # this is the segsum implementation taken from above def segsum(x): """Calculates segment sum.""" T = x.size(-1) x = repeat(x, "... d -> ... d e", e=T) mask = torch.tril(torch.ones(T, T, device=x.device, dtype=bool), diagonal=-1) x = x.masked_fill(~mask, 0) x_segsum = torch.cumsum(x, dim=-2) mask = torch.tril(torch.ones(T, T, device=x.device, dtype=bool), diagonal=0) x_segsum = x_segsum.masked_fill(~mask, -torch.inf) return x_segsum def ssd_minimal_discrete(X, A, B, C, block_len, initial_states=None): """ Arguments: X: (batch, length, n_heads, d_head) A: (batch, length, n_heads) B: (batch, length, n_heads, d_state) C: (batch, length, n_heads, d_state) Return: Y: (batch, length, n_heads, d_head) """ assert X.dtype == A.dtype == B.dtype == C.dtype assert X.shape[1] % block_len == 0 # Rearrange into blocks/chunks X, A, B, C = ( rearrange(x, "b (c l) ... -> b c l ...", l=block_len) for x in (X, A, B, C) ) A = rearrange(A, "b c l h -> b h c l") A_cumsum = torch.cumsum(A, dim=-1) # 1. Compute the output for each intra-chunk (diagonal blocks) L = torch.exp(segsum(A)) Y_diag = torch.einsum("bclhn,bcshn,bhcls,bcshp->bclhp", C, B, L, X) # 2. Compute the state for each intra-chunk # (right term of low-rank factorization of off-diagonal blocks; B terms) decay_states = torch.exp(A_cumsum[:, :, :, -1:] - A_cumsum) states = torch.einsum("bclhn,bhcl,bclhp->bchpn", B, decay_states, X) # 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at # chunk boundaries # (middle term of factorization of off-diag blocks; A terms) if initial_states is None: initial_states = torch.zeros_like(states[:, :1]) states = torch.cat([initial_states, states], dim=1) decay_chunk = torch.exp(segsum(F.pad(A_cumsum[:, :, :, -1], (1, 0)))) new_states = torch.einsum("bhzc,bchpn->bzhpn", decay_chunk, states) states, final_state = new_states[:, :-1], new_states[:, -1] # 4. Compute state -> output conversion per chunk # (left term of low-rank factorization of off-diagonal blocks; C terms) state_decay_out = torch.exp(A_cumsum) Y_off = torch.einsum("bclhn,bchpn,bhcl->bclhp", C, states, state_decay_out) # Add output of intra-chunk and inter-chunk terms # (diagonal and off-diagonal blocks) Y = rearrange(Y_diag + Y_off, "b c l h p -> b (c l) h p") return Y, final_state def generate_random_inputs(batch_size, seqlen, n_heads, d_head, itype, device="cuda"): current_platform.seed_everything(0) A = -torch.exp(torch.rand(n_heads, dtype=itype, device=device)) dt = F.softplus( torch.randn(batch_size, seqlen, n_heads, dtype=itype, device=device) - 4 ) X = torch.randn((batch_size, seqlen, n_heads, d_head), dtype=itype, device=device) B = torch.randn((batch_size, seqlen, n_heads, d_head), dtype=itype, device=device) C = torch.randn((batch_size, seqlen, n_heads, d_head), dtype=itype, device=device) return A, dt, X, B, C def generate_continuous_batched_examples( example_lens_by_batch, num_examples, full_length, last_taken, exhausted, n_heads, d_head, itype, device="cuda", return_naive_ref=True, ): # this function generates a random examples of certain length # and then cut according to "example_lens_by_batch" and feed # them in continuous batches to the kernels. # If if return_naive_ref=True, the naive torch implementation # ssd_minimal_discrete will be used to compute and return # reference output. # generate the full-length example A, dt, X, B, C = generate_random_inputs( num_examples, full_length, n_heads, d_head, itype ) if return_naive_ref: Y_min, final_state_min = ssd_minimal_discrete( X * dt.unsqueeze(-1), A * dt, B, C, block_len=full_length // 4 ) # internal function that outputs a cont batch of examples # given a tuple of lengths for each example in the batch # e.g., example_lens=(8, 4) means take 8 samples from first eg, # 4 examples from second eg, etc def get_continuous_batch(example_lens: tuple[int, ...]): indices = [] for i, x in enumerate(example_lens): c = last_taken.get(i, 0) indices.append((c, c + x)) last_taken[i] = (c + x) % full_length exhausted[i] = last_taken[i] == 0 return ( torch.concat([x[i, s:e] for i, (s, e) in enumerate(indices)]).unsqueeze(0) for x in (dt, X, B, C) ) # internal function that maps "n" to the appropriate right boundary # value when forming continuous batches from examples of length given # by "full_length". # - e.g., when n > full_length, returns n % full_length # when n == full_length, returns full_length def end_boundary(n: int): return n - ((n - 1) // full_length) * full_length IND_E = None for spec in example_lens_by_batch: # get the (maybe partial) example seen in this cont batch dt2, X2, B2, C2 = get_continuous_batch(spec) # get the metadata cu_seqlens = torch.tensor((0,) + spec, device=device).cumsum(dim=0) seq_idx = torch.zeros( cu_seqlens[-1], dtype=torch.int32, device=cu_seqlens.device ) for i, (srt, end) in enumerate( zip( cu_seqlens, cu_seqlens[1:], ) ): seq_idx[srt:end] = i # for cont batch if IND_E is None: IND_S = [0 for _ in range(len(spec))] else: IND_S = [x % full_length for x in IND_E] IND_E = [end_boundary(x + y) for x, y in zip(IND_S, spec)] # varlen has implicit batch=1 dt2 = dt2.squeeze(0) X2 = X2.squeeze(0) B2 = B2.squeeze(0) C2 = C2.squeeze(0) yield ( [Y_min[s, IND_S[s] : IND_E[s]] for s in range(num_examples)] if return_naive_ref else None, cu_seqlens, seq_idx, (A, dt2, X2, B2, C2), ) @pytest.mark.parametrize("itype", [torch.float32, torch.bfloat16]) @pytest.mark.parametrize("n_heads", [4, 16, 32]) @pytest.mark.parametrize("d_head", [5, 8, 32, 128]) @pytest.mark.parametrize("seq_len_chunk_size", [(112, 16), (128, 32)]) def test_mamba_chunk_scan_single_example(d_head, n_heads, seq_len_chunk_size, itype): # this tests the kernels on a single example (bs=1) # TODO: the bfloat16 case requires higher thresholds. To be investigated if itype == torch.bfloat16: atol, rtol = 5e-2, 5e-2 else: atol, rtol = 8e-3, 5e-3 # set seed batch_size = 1 # batch_size # ssd_minimal_discrete requires chunk_size divide seqlen # - this is only required for generating the reference seqs, # it is not an operational limitation. seqlen, chunk_size = seq_len_chunk_size A, dt, X, B, C = generate_random_inputs(batch_size, seqlen, n_heads, d_head, itype) Y_min, final_state_min = ssd_minimal_discrete( X * dt.unsqueeze(-1), A * dt, B, C, chunk_size ) cu_seqlens = torch.tensor((0, seqlen), device="cuda").cumsum(dim=0) cu_chunk_seqlens, last_chunk_indices, seq_idx_chunks = ( compute_varlen_chunk_metadata(cu_seqlens, chunk_size) ) # varlen has implicit batch=1 X = X.squeeze(0) dt = dt.squeeze(0) A = A.squeeze(0) B = B.squeeze(0) C = C.squeeze(0) Y = torch.empty_like(X) final_state = mamba_chunk_scan_combined_varlen( X, dt, A, B, C, chunk_size, cu_seqlens=cu_seqlens.to(torch.int32), cu_chunk_seqlens=cu_chunk_seqlens, last_chunk_indices=last_chunk_indices, seq_idx=seq_idx_chunks, out=Y, D=None, ) # just test the last in sequence torch.testing.assert_close(Y[-1], Y_min[0, -1], atol=atol, rtol=rtol) # just test the last head # NOTE, in the kernel we always cast states to fp32 torch.testing.assert_close( final_state[:, -1].to(torch.float32), final_state_min[:, -1].to(torch.float32), atol=atol, rtol=rtol, ) @pytest.mark.parametrize("itype", [torch.float32]) @pytest.mark.parametrize("n_heads", [4, 8]) @pytest.mark.parametrize("d_head", [5, 16, 32]) @pytest.mark.parametrize( "seq_len_chunk_size_cases", [ # small-ish chunk_size (8) (64, 8, 2, [(64, 32), (64, 32)]), (64, 8, 2, [(8, 8), (8, 8), (8, 8)]), # chunk size boundary ( 64, 8, 2, [(4, 4), (4, 4), (4, 4), (4, 4)], ), # chunk_size larger than cont batches (64, 8, 5, [(64, 32, 16, 8, 8)]), # large-ish chunk_size (256) (64, 256, 1, [(5,), (1,), (1,), (1,)]), # irregular sizes with small sequences ( 64, 256, 2, [(5, 30), (1, 2), (1, 2), (1, 2)], ), # irregular sizes with small sequences # we also need to test some large seqlen # to catch errors with init states decay (768, 128, 2, [(138, 225), (138, 225)]), ], ) def test_mamba_chunk_scan_cont_batch(d_head, n_heads, seq_len_chunk_size_cases, itype): # this test with multiple examples in a continuous batch # (i.e. chunked prefill) seqlen, chunk_size, num_examples, cases = seq_len_chunk_size_cases # This test can have larger error for longer sequences if seqlen > 256: atol, rtol = 1e-2, 5e-3 else: atol, rtol = 5e-3, 5e-3 # hold state during the cutting process so we know if an # example has been exhausted and needs to cycle last_taken: dict = {} # map: eg -> pointer to last taken sample exhausted: dict = {} # map: eg -> boolean indicating example is exhausted states = None for Y_min, cu_seqlens, _token_seq_idx, ( A, dt, X, B, C, ) in generate_continuous_batched_examples( cases, num_examples, seqlen, last_taken, exhausted, n_heads, d_head, itype ): cu_chunk_seqlens, last_chunk_indices, seq_idx_chunks = ( compute_varlen_chunk_metadata(cu_seqlens, chunk_size) ) Y = torch.empty_like(X) new_states = mamba_chunk_scan_combined_varlen( X, dt, A, B, C, chunk_size, cu_seqlens=cu_seqlens.to(torch.int32), cu_chunk_seqlens=cu_chunk_seqlens, last_chunk_indices=last_chunk_indices, seq_idx=seq_idx_chunks, out=Y, D=None, initial_states=states, ) # just test the last in sequence for i in range(num_examples): # just test one dim and dstate Y_eg = Y[cu_seqlens[i] : cu_seqlens[i + 1], 0, 0] Y_min_eg = Y_min[i][:, 0, 0] torch.testing.assert_close(Y_eg, Y_min_eg, atol=atol, rtol=rtol) # update states states = new_states for i, clear in exhausted.items(): if clear: states[i].fill_(0.0) exhausted[i] = False @pytest.mark.parametrize("chunk_size", [8, 256]) @pytest.mark.parametrize( "seqlens", [(16, 20), (270, 88, 212, 203)], ) def test_mamba_chunk_scan_cont_batch_prefill_chunking(chunk_size, seqlens): # This test verifies the correctness of the chunked prefill implementation # in the mamba2 ssd kernels, by comparing concatenation (in the sequence # dimension) of chunked results with the full sequence result. # It is different from test_mamba_chunk_scan_cont_batch by: # 1. Not using the naive torch implementation (ssd_minimal_discrete) to get # reference outputs. Instead, it compares chunked kernel outputs to full # sequence kernel outputs. This is the most straightforward way to # assert chunked prefill correctness. # 2. It focuses on cases where sequences change in the middle of mamba # chunks, and not necessarily on chunk boundaries. max_seqlen = max(seqlens) # This test can have larger error for longer sequences if max_seqlen > 256: atol, rtol = 1e-2, 5e-3 else: atol, rtol = 5e-3, 5e-3 num_sequences = len(seqlens) n_heads = 16 d_head = 64 itype = torch.float32 # hold state during the cutting process so we know if an # example has been exhausted and needs to cycle last_taken: dict = {} # map: eg -> pointer to last taken sample exhausted: dict = {} # map: eg -> boolean indicating example is exhausted _, cu_seqlens, seq_idx, (A, dt, X, B, C) = next( generate_continuous_batched_examples( [seqlens], num_sequences, max_seqlen, last_taken, exhausted, n_heads, d_head, itype, return_naive_ref=False, ) ) seqlens = torch.tensor(seqlens, dtype=torch.int32, device=X.device) device = X.device ## full seqlen computation cu_chunk_seqlens, last_chunk_indices, seq_idx_chunks = ( compute_varlen_chunk_metadata(cu_seqlens, chunk_size) ) Y_ref = torch.empty_like(X) state_ref = mamba_chunk_scan_combined_varlen( X, dt, A, B, C, chunk_size, cu_seqlens=cu_seqlens.to(torch.int32), cu_chunk_seqlens=cu_chunk_seqlens, last_chunk_indices=last_chunk_indices, seq_idx=seq_idx_chunks, out=Y_ref, D=None, initial_states=None, ) ## chunked seqlen computation # first chunk chunked_seqlens = seqlens // 2 chunked_cu_seqlens = torch.cat( [torch.tensor([0], device=device), torch.cumsum(chunked_seqlens, dim=0)], dim=0 ) chunked_input_seq_len = chunked_cu_seqlens[-1] X_chunked = torch.zeros_like(X)[:chunked_input_seq_len, ...] dt_chunked = torch.zeros_like(dt)[:chunked_input_seq_len, ...] B_chunked = torch.zeros_like(B)[:chunked_input_seq_len, ...] C_chunked = torch.zeros_like(C)[:chunked_input_seq_len, ...] for i in range(num_sequences): chunk_f = lambda x, i: x[ cu_seqlens[i] : cu_seqlens[i] + chunked_seqlens[i], ... ] X_chunked[chunked_cu_seqlens[i] : chunked_cu_seqlens[i + 1], ...] = chunk_f( X, i ) dt_chunked[chunked_cu_seqlens[i] : chunked_cu_seqlens[i + 1], ...] = chunk_f( dt, i ) B_chunked[chunked_cu_seqlens[i] : chunked_cu_seqlens[i + 1], ...] = chunk_f( B, i ) C_chunked[chunked_cu_seqlens[i] : chunked_cu_seqlens[i + 1], ...] = chunk_f( C, i ) cu_chunk_seqlens, last_chunk_indices, seq_idx_chunks = ( compute_varlen_chunk_metadata(chunked_cu_seqlens, chunk_size) ) Y_partial = torch.empty_like(X_chunked) partial_state = mamba_chunk_scan_combined_varlen( X_chunked, dt_chunked, A, B_chunked, C_chunked, chunk_size, cu_seqlens=chunked_cu_seqlens.to(torch.int32), cu_chunk_seqlens=cu_chunk_seqlens, last_chunk_indices=last_chunk_indices, seq_idx=seq_idx_chunks, out=Y_partial, D=None, initial_states=None, ) # remaining chunk remaining_chunked_seqlens = seqlens - chunked_seqlens remaining_chunked_cu_seqlens = torch.cat( [ torch.tensor([0], device=device), torch.cumsum(remaining_chunked_seqlens, dim=0), ], dim=0, ) remaining_chunked_input_seq_len = remaining_chunked_cu_seqlens[-1] remaining_X_chunked = torch.zeros_like(X)[:remaining_chunked_input_seq_len, ...] remaining_dt_chunked = torch.zeros_like(dt)[:remaining_chunked_input_seq_len, ...] remaining_B_chunked = torch.zeros_like(B)[:remaining_chunked_input_seq_len, ...] remaining_C_chunked = torch.zeros_like(C)[:remaining_chunked_input_seq_len, ...] for i in range(num_sequences): remaining_chunk_f = lambda x, i: x[ cu_seqlens[i] + chunked_seqlens[i] : cu_seqlens[i + 1], ... ] remaining_X_chunked[ remaining_chunked_cu_seqlens[i] : remaining_chunked_cu_seqlens[i + 1], ... ] = remaining_chunk_f(X, i) remaining_dt_chunked[ remaining_chunked_cu_seqlens[i] : remaining_chunked_cu_seqlens[i + 1], ... ] = remaining_chunk_f(dt, i) remaining_B_chunked[ remaining_chunked_cu_seqlens[i] : remaining_chunked_cu_seqlens[i + 1], ... ] = remaining_chunk_f(B, i) remaining_C_chunked[ remaining_chunked_cu_seqlens[i] : remaining_chunked_cu_seqlens[i + 1], ... ] = remaining_chunk_f(C, i) # assert input chunking is correct concat_chunk_f = lambda pt1, pt2, i: torch.cat( [ pt1[chunked_cu_seqlens[i] : chunked_cu_seqlens[i + 1], ...], pt2[ remaining_chunked_cu_seqlens[i] : remaining_chunked_cu_seqlens[i + 1], ..., ], ], dim=0, ) concat_batch_f = lambda pt1, pt2: torch.cat( [concat_chunk_f(pt1, pt2, i) for i in range(num_sequences)], dim=0 ) assert concat_batch_f(X_chunked, remaining_X_chunked).equal(X) assert concat_batch_f(dt_chunked, remaining_dt_chunked).equal(dt) assert concat_batch_f(B_chunked, remaining_B_chunked).equal(B) assert concat_batch_f(C_chunked, remaining_C_chunked).equal(C) cu_chunk_seqlens, last_chunk_indices, seq_idx_chunks = ( compute_varlen_chunk_metadata(remaining_chunked_cu_seqlens, chunk_size) ) Y_chunked = torch.empty_like(remaining_X_chunked) state_chunked = mamba_chunk_scan_combined_varlen( remaining_X_chunked, remaining_dt_chunked, A, remaining_B_chunked, remaining_C_chunked, chunk_size, cu_seqlens=remaining_chunked_cu_seqlens.to(torch.int32), cu_chunk_seqlens=cu_chunk_seqlens, last_chunk_indices=last_chunk_indices, seq_idx=seq_idx_chunks, out=Y_chunked, D=None, initial_states=partial_state, ) Y = concat_batch_f(Y_partial, Y_chunked) # kernel chunked is same as kernel overall for i in range(num_sequences): Y_seq = Y[cu_seqlens[i] : cu_seqlens[i + 1], ...] Y_ref_seq = Y_ref[cu_seqlens[i] : cu_seqlens[i + 1], ...] torch.testing.assert_close( Y_seq[: chunked_seqlens[i], ...], Y_ref_seq[: chunked_seqlens[i], ...], atol=atol, rtol=rtol, msg=lambda x, i=i: f"seq{i} output part1 " + x, ) torch.testing.assert_close( Y_seq[chunked_seqlens[i] :, ...], Y_ref_seq[chunked_seqlens[i] :, ...], atol=atol, rtol=rtol, msg=lambda x, i=i: f"seq{i} output part2 " + x, ) state_seq = state_chunked[i] state_seq_ref = state_ref[i] torch.testing.assert_close( state_seq, state_seq_ref, atol=atol, rtol=rtol, msg=lambda x, i=i: f"seq{i} state " + x, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/mamba/test_mamba_ssm.py
tests/kernels/mamba/test_mamba_ssm.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch import torch.nn.functional as F from einops import rearrange, repeat from tests.kernels.utils import opcheck from vllm import _custom_ops as ops # noqa: F401 from vllm.attention.backends.utils import PAD_SLOT_ID from vllm.model_executor.layers.mamba.ops.mamba_ssm import ( selective_scan_fn, selective_state_update, ) from vllm.platforms import current_platform def selective_state_update_ref( state, x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False ): """ Argument: state: (batch, dim, dstate) or (batch, nheads, dim, dstate) x: (batch, dim) or (batch, nheads, dim) dt: (batch, dim) or (batch, nheads, dim) A: (dim, dstate) or (nheads, dim, dstate) B: (batch, dstate) or (batch, ngroups, dstate) C: (batch, dstate) or (batch, ngroups, dstate) D: (dim,) or (nheads, dim) z: (batch, dim) or (batch, nheads, dim) dt_bias: (dim,) or (nheads, dim) Return: out: (batch, dim) or (batch, nheads, dim) """ has_heads = state.dim() > 3 if state.dim() == 3: state = state.unsqueeze(1) if x.dim() == 2: x = x.unsqueeze(1) if dt.dim() == 2: dt = dt.unsqueeze(1) if A.dim() == 2: A = A.unsqueeze(0) if B.dim() == 2: B = B.unsqueeze(1) if C.dim() == 2: C = C.unsqueeze(1) if D is not None and D.dim() == 1: D = D.unsqueeze(0) if z is not None and z.dim() == 2: z = z.unsqueeze(1) if dt_bias is not None and dt_bias.dim() == 1: dt_bias = dt_bias.unsqueeze(0) batch, nheads, dim, dstate = state.shape assert x.shape == (batch, nheads, dim) assert dt.shape == x.shape assert A.shape == (nheads, dim, dstate) ngroups = B.shape[1] assert nheads % ngroups == 0, "nheads must be divisible by ngroups" assert B.shape == (batch, ngroups, dstate) assert C.shape == B.shape if D is not None: assert D.shape == (nheads, dim) if z is not None: assert z.shape == x.shape if dt_bias is not None: assert dt_bias.shape == (nheads, dim) dt = dt + dt_bias dt = F.softplus(dt) if dt_softplus else dt dA = torch.exp( rearrange(dt, "b h d -> b h d 1") * A ) # (batch, nheads, dim, dstate) B = repeat(B, "b g n -> b (g h) n", h=nheads // ngroups) # (batch, nheads, dstate) C = repeat(C, "b g n -> b (g h) n", h=nheads // ngroups) # (batch, nheads, dstate) dB = rearrange(dt, "b h d -> b h d 1") * rearrange( B, "b h n -> b h 1 n" ) # (batch, nheads, dim, dstate) state.copy_( state * dA + dB * rearrange(x, "b h d -> b h d 1") ) # (batch, dim, dstate out = torch.einsum("bhdn,bhn->bhd", state.to(C.dtype), C) if D is not None: out += (x * D).to(out.dtype) out = (out if z is None else out * F.silu(z)).to(x.dtype) if not has_heads: out = out.squeeze(1) return out def selective_scan_ref( u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, return_last_state=False, prev_state=None, final_state_out=None, ): """ u: r(B D L) delta: r(B D L) A: c(D N) or r(D N) B: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L) C: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L) D: r(D) z: r(B D L) delta_bias: r(D), fp32 prev_state: r(B D N), fp32 out: r(B D L) last_state (optional): r(B D dstate) or c(B D dstate) """ dtype_in = u.dtype u = u.float() delta = delta.float() if delta_bias is not None: delta = delta + delta_bias[..., None].float() if delta_softplus: delta = F.softplus(delta) batch, dim, dstate = u.shape[0], A.shape[0], A.shape[1] is_variable_B = B.dim() >= 3 is_variable_C = C.dim() >= 3 B = B.float() C = C.float() x = A.new_zeros((batch, dim, dstate)) if prev_state is None else prev_state ys = [] deltaA = torch.exp(torch.einsum("bdl,dn->bdln", delta, A)) if not is_variable_B: deltaB_u = torch.einsum("bdl,dn,bdl->bdln", delta, B, u) else: if B.dim() == 3: deltaB_u = torch.einsum("bdl,bnl,bdl->bdln", delta, B, u) else: B = repeat(B, "B G N L -> B (G H) N L", H=dim // B.shape[1]) deltaB_u = torch.einsum("bdl,bdnl,bdl->bdln", delta, B, u) if is_variable_C and C.dim() == 4: C = repeat(C, "B G N L -> B (G H) N L", H=dim // C.shape[1]) for i in range(u.shape[2]): x = deltaA[:, :, i] * x + deltaB_u[:, :, i] if not is_variable_C: y = torch.einsum("bdn,dn->bd", x, C) else: if C.dim() == 3: y = torch.einsum("bdn,bn->bd", x, C[:, :, i]) else: y = torch.einsum("bdn,bdn->bd", x, C[:, :, :, i]) if i == u.shape[2] - 1: if final_state_out is None: final_state_out = x else: final_state_out.copy_(x) ys.append(y) y = torch.stack(ys, dim=2) # (batch dim L) out = y if D is None else y + u * rearrange(D, "d -> d 1") if z is not None: out = out * F.silu(z) out = out.to(dtype=dtype_in) return out if not return_last_state else (out, final_state_out) def selective_scan_opcheck_fn( u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, cu_seq_len=None, cache_indices=None, has_initial_state=None, ssm_states=None, pad_slot_id=PAD_SLOT_ID, block_size=2048, block_idx_first_scheduled_token=None, block_idx_last_scheduled_token=None, initial_state_idx=None, ): """if return_last_state is True, returns (out, last_state) last_state has shape (batch, dim, dstate). """ if u.stride(-1) != 1: u = u.contiguous() if delta.stride(-1) != 1: delta = delta.contiguous() if D is not None: D = D.contiguous() if B.stride(-1) != 1: B = B.contiguous() if C.stride(-1) != 1: C = C.contiguous() if z is not None and z.stride(-1) != 1: z = z.contiguous() if B.dim() == 3 and cu_seq_len is None: B = B.unsqueeze(1) if B.dim() == 2 and cu_seq_len is not None: B = B.unsqueeze(0) if C.dim() == 3 and cu_seq_len is None: C = C.unsqueeze(1) if C.dim() == 2 and cu_seq_len is not None: C = C.unsqueeze(0) # Disable test_autograd_registration for now as it seems to trigger # a bogus error. opcheck( torch.ops._C.selective_scan_fwd, ( u, delta, A, B, C, D, z, delta_bias, delta_softplus, cu_seq_len, cache_indices, has_initial_state, ssm_states, pad_slot_id, block_size, block_idx_first_scheduled_token, block_idx_last_scheduled_token, initial_state_idx, ), test_utils=["test_schema", "test_faketensor"], ) @pytest.mark.parametrize("wtype", [torch.float32]) @pytest.mark.parametrize("itype", [torch.float32, torch.bfloat16]) @pytest.mark.parametrize("seqlen", [128, 1024, 4096]) @pytest.mark.parametrize("has_delta_bias", [True]) @pytest.mark.parametrize("delta_softplus", [True]) @pytest.mark.parametrize("has_z", [True]) @pytest.mark.parametrize("has_D", [True]) @pytest.mark.parametrize("varBC_groups", [1, 2]) @pytest.mark.parametrize("is_variable_C", [True]) @pytest.mark.parametrize("is_variable_B", [True]) @pytest.mark.parametrize("scan_chunks", [1, 3]) def test_selective_scan( is_variable_B, is_variable_C, varBC_groups, has_D, has_z, has_delta_bias, delta_softplus, seqlen, itype, wtype, scan_chunks, ): if varBC_groups > 1 and (not is_variable_B or not is_variable_C): pytest.skip() # This config is not applicable device = "cuda" rtol, atol = (6e-4, 2e-3) if itype == torch.float32 else (3e-3, 5e-3) if itype == torch.bfloat16: rtol, atol = 3e-2, 5e-2 rtolw, atolw = (1e-3, 1e-3) if has_z: # If we have z, the errors on the weights seem higher rtolw = max(rtolw, rtol) atolw = max(atolw, atol) # set seed current_platform.seed_everything(0) batch_size = 1 dim = 4 dstate = 8 A = -0.5 * torch.rand(dim, dstate, device=device, dtype=wtype) A_ref = A.clone() if not is_variable_B: B_shape = [dim, dstate] elif varBC_groups == 1: B_shape = [batch_size, dstate, seqlen] else: B_shape = [batch_size, varBC_groups, dstate, seqlen] B = torch.randn(B_shape, device=device, dtype=wtype if not is_variable_B else itype) B_ref = B.clone() if not is_variable_C: C_shape = [dim, dstate] elif varBC_groups == 1: C_shape = [batch_size, dstate, seqlen] else: C_shape = [batch_size, varBC_groups, dstate, seqlen] C = torch.randn(C_shape, device=device, dtype=wtype if not is_variable_C else itype) C_ref = C.clone() D = torch.randn(dim, device=device, dtype=torch.float32) if has_D else None D_ref = D.clone() z = ( torch.randn(batch_size, dim, seqlen, device=device, dtype=itype) if has_z else None ) z_ref = z.clone() if has_z else None delta_bias = ( (0.5 * torch.rand(dim, device=device, dtype=torch.float32)) if has_delta_bias else None ) u = torch.randn(batch_size, dim, seqlen, device=device, dtype=itype) u_ref = u.clone() delta = 0.5 * torch.rand(batch_size, dim, seqlen, device=device, dtype=itype) delta_ref = delta.clone() state_shape = (batch_size, u.shape[1], int(A.shape[1])) state = torch.randn(state_shape, device=u.device, dtype=itype, requires_grad=False) state_ref = state.clone() out = None out_ref = None outs = [] for c in range(scan_chunks): chunked_prompt_len = seqlen // scan_chunks chunk_start = chunked_prompt_len * c chunk_end = chunked_prompt_len * (c + 1) if c == scan_chunks - 1: chunk_end = seqlen _B = B if is_variable_B: _B = B[..., chunk_start:chunk_end] _C = C if is_variable_B: _C = C[..., chunk_start:chunk_end] _z = z if has_z: assert z is not None _z = z[..., chunk_start:chunk_end] out = selective_scan_fn( u[..., chunk_start:chunk_end], state, delta[..., chunk_start:chunk_end], A, _B, _C, D, z=_z, delta_bias=delta_bias, delta_softplus=delta_softplus, has_initial_state=torch.ones(batch_size, device=u.device, dtype=torch.bool) if c > 0 else None, pad_slot_id=PAD_SLOT_ID, block_size=2048, block_idx_first_scheduled_token=None, block_idx_last_scheduled_token=None, initial_state_idx=None, ) outs.append(out) if len(outs) > 1: out = torch.cat(outs, dim=-1) out_ref, state_ref, *rest = selective_scan_ref( u_ref, delta_ref, A_ref, B_ref, C_ref, D_ref, z=z_ref, delta_bias=delta_bias, delta_softplus=delta_softplus, return_last_state=True, ) assert out is not None and out_ref is not None assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) assert state is not None and state_ref is not None assert torch.allclose(state, state_ref.to(itype), rtol=rtol, atol=atol) selective_scan_opcheck_fn( u, delta, A, B, C, D, z, delta_bias=delta_bias, delta_softplus=delta_softplus, ssm_states=state, block_size=2048, ) @pytest.mark.parametrize("itype", [torch.float32, torch.bfloat16]) @pytest.mark.parametrize("has_z", [False, True]) @pytest.mark.parametrize("dstate", [16, 64]) @pytest.mark.parametrize("dim", [2048, 2048 + 16, 4096]) def test_selective_state_update(dim, dstate, has_z, itype): device = "cuda" rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (5e-3, 1e-2) if itype == torch.bfloat16: rtol, atol = 1e-2, 5e-2 if torch.version.hip: atol *= 2 # set seed current_platform.seed_everything(0) batch_size = 1 state = torch.randn(batch_size, dim, dstate, dtype=itype, device=device) x = torch.randn(batch_size, dim, device=device, dtype=itype) out = torch.empty_like(x) dt = torch.randn(batch_size, dim, device=device, dtype=itype) dt_bias = torch.rand(dim, device=device) - 4.0 A = -torch.rand(dim, dstate, device=device) - 1.0 B = torch.randn(batch_size, dstate, device=device) C = torch.randn(batch_size, dstate, device=device) D = torch.randn(dim, device=device) z = torch.randn_like(x) if has_z else None state_ref = state.detach().clone() selective_state_update( state, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True, out=out ) out_ref = selective_state_update_ref( state_ref, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True ) assert torch.allclose(state, state_ref, rtol=rtol, atol=atol) assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) @pytest.mark.parametrize("itype", [torch.float32, torch.bfloat16]) @pytest.mark.parametrize("has_z", [False, True]) @pytest.mark.parametrize("dstate", [16, 64]) @pytest.mark.parametrize("dim", [2048, 2048 + 16, 4096]) @pytest.mark.parametrize("max_seq_len", [1, 2, 4]) def test_selective_state_update_varlen(dim, dstate, has_z, itype, max_seq_len): device = "cuda" rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (5e-3, 1e-2) if itype == torch.bfloat16: rtol, atol = 5e-2, 1.5e-1 if torch.version.hip: atol *= 2 # set seed current_platform.seed_everything(0) batch_size = 4 token_counts = torch.randint(1, max_seq_len + 1, (batch_size,), device=device) total_tokens = int(token_counts.sum().item()) cu_seqlens = torch.tensor( [0] + torch.cumsum(token_counts, dim=0).tolist(), dtype=torch.int32, device=device, ) state = torch.randn(batch_size, dim, dstate, dtype=itype, device=device) x = torch.randn(total_tokens, dim, device=device, dtype=itype) out = torch.empty_like(x) dt = torch.randn(total_tokens, dim, device=device, dtype=itype) dt_bias = torch.rand(dim, device=device) - 4.0 A = -torch.rand(dim, dstate, device=device) - 1.0 B = torch.randn(total_tokens, dstate, device=device) C = torch.randn(total_tokens, dstate, device=device) D = torch.randn(dim, device=device) z = torch.randn_like(x) if has_z else None state_ref = state.detach().clone() selective_state_update( state, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True, out=out, cu_seqlens=cu_seqlens, ) out_ref_list = [] for seq_idx in range(batch_size): start_idx = cu_seqlens[seq_idx].item() end_idx = cu_seqlens[seq_idx + 1].item() num_tokens = end_idx - start_idx for token_idx in range(num_tokens): idx = start_idx + token_idx out_ref_list.append( selective_state_update_ref( state_ref[seq_idx : seq_idx + 1], x[idx : idx + 1], dt[idx : idx + 1], A, B[idx : idx + 1], C[idx : idx + 1], D=D, z=z[idx : idx + 1] if has_z else None, dt_bias=dt_bias, dt_softplus=True, ) ) out_ref = torch.cat(out_ref_list, dim=0) assert torch.allclose(state, state_ref, rtol=rtol, atol=atol) assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) @pytest.mark.parametrize("wtype", [torch.float32]) @pytest.mark.parametrize("itype", [torch.float32]) @pytest.mark.parametrize("seqlen", [1, 256, 1024, 4096]) @pytest.mark.parametrize("return_last_state", [True]) @pytest.mark.parametrize("has_delta_bias", [True]) @pytest.mark.parametrize("delta_softplus", [True]) @pytest.mark.parametrize("has_z", [True]) @pytest.mark.parametrize("has_D", [True]) @pytest.mark.parametrize("varBC_groups", [1, 2]) @pytest.mark.parametrize("is_variable_C", [True]) @pytest.mark.parametrize("is_variable_B", [True]) # tests correctness in case subset of the sequences are padded @pytest.mark.parametrize("with_padding", [False, True]) def test_selective_scan_varlen( with_padding, is_variable_B, is_variable_C, varBC_groups, has_D, has_z, has_delta_bias, delta_softplus, return_last_state, seqlen, itype, wtype, ): if varBC_groups > 1 and (not is_variable_B or not is_variable_C): pytest.skip() # This config is not applicable device = "cuda" rtol, atol = (6e-4, 2e-3) if itype == torch.float32 else (3e-3, 5e-3) if itype == torch.bfloat16: rtol, atol = 3e-2, 5e-2 rtolw, atolw = (1e-3, 1e-3) if has_z: # If we have z, the errors on the weights seem higher rtolw = max(rtolw, rtol) atolw = max(atolw, atol) # set seed torch.random.manual_seed(0) seqlens = [] batch_size = 4 if seqlen < 10: batch_size = 1 padding = 3 if with_padding else 0 padded_batch_size = batch_size + padding if with_padding and seqlen < padded_batch_size: pytest.skip() nsplits = padded_batch_size - 1 eos_pos = torch.randperm(seqlen - 1)[:nsplits].sort().values seqlens.append( torch.diff( torch.cat([torch.tensor([-1]), eos_pos, torch.tensor([seqlen - 1])]) ).tolist() ) assert sum(seqlens[-1]) == seqlen assert all(s > 0 for s in seqlens[-1]) total_entries = batch_size * 10 cumsum = torch.cumsum(torch.tensor(seqlens[0]), dim=0).to(torch.int32) cumsum = torch.concat([torch.tensor([0], dtype=torch.int32), cumsum], dim=0).cuda() dim = 4 dstate = 8 A = -0.5 * torch.rand(dim, dstate, device=device, dtype=wtype) A_ref = A.clone() B_shape = [varBC_groups, dstate, seqlen] B = torch.randn(B_shape, device=device, dtype=wtype if not is_variable_B else itype) B_ref = B.clone() C_shape = [varBC_groups, dstate, seqlen] C = torch.randn(C_shape, device=device, dtype=wtype if not is_variable_C else itype) C_ref = C.clone() D = torch.randn(dim, device=device, dtype=torch.float32) if has_D else None D_ref = D.clone() z = torch.randn(dim, seqlen, device=device, dtype=itype) z_ref = z.clone() delta_bias = ( (0.5 * torch.rand(dim, device=device, dtype=torch.float32)) if has_delta_bias else None ) u = torch.randn(dim, seqlen, device=device, dtype=itype) u_ref = u.clone() delta = 0.5 * torch.rand(dim, seqlen, device=device, dtype=itype) delta_ref = delta.clone() out = None out_ref = None prev_state_shape = (total_entries, u.shape[0], int(A.shape[1])) prev_state = torch.randn( prev_state_shape, device=u.device, dtype=itype, requires_grad=False ) prev_state_ref = prev_state.clone() state_indices = torch.randperm(total_entries, dtype=torch.int32, device=u.device)[ :batch_size ] unused_states_bool = torch.ones(total_entries, dtype=torch.bool, device=device) unused_states_bool[state_indices] = False padded_state_indices = torch.concat( [ state_indices, torch.as_tensor([PAD_SLOT_ID] * padding, dtype=torch.int32, device=device), ], dim=-1, ) has_initial_state = torch.randint( 0, 2, (cumsum.shape[0] - 1,), dtype=torch.bool, device=u.device ) out = selective_scan_fn( u, prev_state, delta, A, B, C, D, z, delta_bias, delta_softplus, cumsum, padded_state_indices, has_initial_state, ) outs_ref = [] splits = [ torch.split(var, seqlens[0], dim=-1) for var in (u_ref, delta_ref, B_ref, C_ref, z_ref) ] for i in range(len(seqlens[0])): u_s, delta_s, B_s, C_s, z_s = (v[i].unsqueeze(0) for v in splits) if padded_state_indices[i] == PAD_SLOT_ID: continue out_ref_s, _ = selective_scan_ref( u_s, delta_s, A_ref, B_s, C_s, D_ref, z=z_s, delta_bias=delta_bias, delta_softplus=delta_softplus, return_last_state=return_last_state, prev_state=prev_state_ref[padded_state_indices[i]].unsqueeze(0) if has_initial_state[i] else None, final_state_out=prev_state_ref[padded_state_indices[i]].unsqueeze(0), ) outs_ref.append(out_ref_s) out_ref = torch.cat(outs_ref, dim=-1)[0] unpadded_out = out[:, : out_ref[0].shape[-1]] print("Output diff max", (unpadded_out - out_ref).max()) print("Output diff mean", (unpadded_out - out_ref).mean()) print("Output state diff max", (prev_state - prev_state_ref).max()) print("Output state diff mean", (prev_state - prev_state_ref).mean()) assert torch.allclose(prev_state, prev_state_ref, rtol=rtol, atol=atol) assert torch.allclose(unpadded_out, out_ref, rtol=rtol, atol=atol) selective_scan_opcheck_fn( u, delta, A, B, C, D, z, delta_bias, delta_softplus, cumsum, padded_state_indices, has_initial_state, prev_state, block_size=2048, ) @pytest.mark.parametrize("itype", [torch.float32, torch.bfloat16]) @pytest.mark.parametrize("has_z", [True]) @pytest.mark.parametrize("dstate", [16, 64]) @pytest.mark.parametrize("dim", [2048, 2048 + 16, 4096]) # tests correctness in case subset of the sequences are padded @pytest.mark.parametrize("with_padding", [True, False]) def test_selective_state_update_with_batch_indices( with_padding, dim, dstate, has_z, itype ): device = "cuda" rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (5e-3, 1e-2) if itype == torch.bfloat16: rtol, atol = 1e-1, 1e-1 if torch.version.hip: atol *= 2 # set seed torch.random.manual_seed(0) batch_size = 3 padding = 5 if with_padding else 0 padded_batch_size = batch_size + padding total_entries = 10 * batch_size state = torch.randn(total_entries, dim, dstate, dtype=itype, device=device) state_indices = torch.randperm(total_entries)[:batch_size].to( dtype=torch.int32, device=device ) unused_states_bool = torch.ones(total_entries, dtype=torch.bool, device=device) unused_states_bool[state_indices] = False padded_state_indices = torch.concat( [ state_indices, torch.as_tensor([PAD_SLOT_ID] * padding, dtype=torch.int32, device=device), ], dim=0, ) x = torch.randn(padded_batch_size, dim, device=device, dtype=itype) out = torch.empty_like(x) dt = torch.randn(padded_batch_size, dim, device=device, dtype=itype) dt_bias = torch.rand(dim, device=device) - 4.0 A = -torch.rand(dim, dstate, device=device) - 1.0 B = torch.randn(padded_batch_size, dstate, device=device) C = torch.randn(padded_batch_size, dstate, device=device) D = torch.randn(dim, device=device) z = torch.randn_like(x) if has_z else None state_ref = state[state_indices, :].clone() state_before = state.clone() selective_state_update( state, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True, state_batch_indices=padded_state_indices, pad_slot_id=PAD_SLOT_ID, out=out, ) out_ref = selective_state_update_ref( state_ref, x[:batch_size], dt[:batch_size], A, B[:batch_size], C[:batch_size], D=D, z=z[:batch_size], dt_bias=dt_bias, dt_softplus=True, ) print("Output diff max", (out[:batch_size] - out_ref).max()) print("Output diff mean", (out[:batch_size] - out_ref).mean()) print("Output state diff max", (state[state_indices, :] - state_ref).max()) print("Output state diff mean", (state[state_indices, :] - state_ref).mean()) # test padded entries stay the same if with_padding: assert torch.equal(state_before[unused_states_bool], state[unused_states_bool]) assert torch.equal(x[batch_size + 1 :], x[batch_size + 1 :]) assert torch.equal(dt[batch_size + 1 :], dt[batch_size + 1 :]) assert torch.equal(B[batch_size + 1 :], B[batch_size + 1 :]) assert torch.equal(C[batch_size + 1 :], C[batch_size + 1 :]) # test "real" entries assert torch.allclose(state[state_indices, :], state_ref, rtol=rtol, atol=atol) assert torch.allclose(out[:batch_size], out_ref, rtol=rtol, atol=atol) @pytest.mark.parametrize("itype", [torch.float32, torch.bfloat16]) @pytest.mark.parametrize("has_z", [False, True]) @pytest.mark.parametrize("tie_hdim", [False, True]) @pytest.mark.parametrize("ngroups", [1, 4]) @pytest.mark.parametrize("dstate", [16, 64]) @pytest.mark.parametrize("dim", [2048, 4096]) def test_selective_state_update_with_heads_with_batch_indices( dim, dstate, ngroups, has_z, tie_hdim, itype ): device = "cuda" rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (5e-3, 3e-2) if itype == torch.bfloat16: rtol, atol = 1e-1, 1e-1 # set seed torch.random.manual_seed(0) batch_size = 3 headdim = 64 nheads = dim // headdim total_entries = 10 * batch_size state = torch.randn( total_entries, nheads, headdim, dstate, dtype=itype, device=device ) state_indices = torch.randperm(total_entries)[:batch_size].to( dtype=torch.int32, device=device ) x = torch.randn(batch_size, nheads, headdim, device=device, dtype=itype) out = torch.empty_like(x) if not tie_hdim: dt = torch.randn(batch_size, nheads, headdim, device=device, dtype=itype) dt_bias = torch.rand(nheads, headdim, device=device) - 4.0 A = -torch.rand(nheads, headdim, dstate, device=device) - 1.0 D = torch.randn(nheads, headdim, device=device) else: dt = repeat( torch.randn(batch_size, nheads, device=device, dtype=itype), "b h -> b h p", p=headdim, ) dt_bias = repeat(torch.rand(nheads, device=device) - 4.0, "h -> h p", p=headdim) A = repeat( -torch.rand(nheads, device=device) - 1.0, "h -> h p n", p=headdim, n=dstate ) D = repeat(torch.randn(nheads, device=device), "h -> h p", p=headdim) B = torch.randn(batch_size, ngroups, dstate, device=device) C = torch.randn(batch_size, ngroups, dstate, device=device) z = torch.randn_like(x) if has_z else None state_ref = state[state_indices, :].detach().clone() selective_state_update( state, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True, state_batch_indices=state_indices, pad_slot_id=PAD_SLOT_ID, out=out, ) out_ref = selective_state_update_ref( state_ref, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True ) print(f"Output max diff: {(out - out_ref).abs().max().item()}") print(f"Output mean diff: {(out - out_ref).abs().mean().item()}") assert torch.allclose(state[state_indices, :], state_ref, rtol=rtol, atol=atol) assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) @pytest.mark.parametrize("itype", [torch.float32, torch.bfloat16]) @pytest.mark.parametrize("has_z", [False, True]) @pytest.mark.parametrize("dstate", [16, 64]) @pytest.mark.parametrize("dim", [2048, 4096]) @pytest.mark.parametrize("max_seq_len", [2, 4]) def test_selective_state_update_with_num_accepted_tokens( dim, dstate, has_z, itype, max_seq_len ): device = "cuda" rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (5e-3, 1e-2) if itype == torch.bfloat16: rtol, atol = 5e-2, 1.5e-1 if torch.version.hip: atol *= 2 current_platform.seed_everything(0) batch_size = 4 tokens_per_seq = torch.randint(1, max_seq_len + 1, (batch_size,), device=device) total_tokens = int(tokens_per_seq.sum().item()) num_accepted_tokens = torch.randint(0, max_seq_len, (batch_size,), device=device) num_accepted_tokens[0] = 0 # Add edge-case of no accepted tokens num_accepted_tokens[1] = max_seq_len # Add edge-case of all tokens accepted cu_seqlens = torch.tensor( [0] + torch.cumsum(tokens_per_seq, dim=0).tolist(), dtype=torch.int32, device=device, ) total_state_slots = 50 state = torch.randn(total_state_slots, dim, dstate, dtype=itype, device=device) state_batch_indices = torch.full( (batch_size, max_seq_len), PAD_SLOT_ID, dtype=torch.int32, device=device ) initial_state_slots = torch.randint( 0, 15, (batch_size,), device=device, dtype=torch.int32 ) for seq_idx in range(batch_size): token_pos = max(num_accepted_tokens[seq_idx].item() - 1, 0) state_batch_indices[seq_idx, token_pos] = initial_state_slots[seq_idx] dst_state_batch_indices = torch.full( (batch_size, max_seq_len), PAD_SLOT_ID, dtype=torch.int32, device=device ) slot_offset = 15 dst_slots_map = {} for seq_idx in range(batch_size): for token_idx in range(tokens_per_seq[seq_idx].item()): dst_state_batch_indices[seq_idx, token_idx] = slot_offset dst_slots_map[(seq_idx, token_idx)] = slot_offset slot_offset += 1 x = torch.randn(total_tokens, dim, device=device, dtype=itype) out = torch.empty_like(x) dt = torch.randn(total_tokens, dim, device=device, dtype=itype) dt_bias = torch.rand(dim, device=device) - 4.0 A = -torch.rand(dim, dstate, device=device) - 1.0 B = torch.randn(total_tokens, dstate, device=device) C = torch.randn(total_tokens, dstate, device=device) D = torch.randn(dim, device=device) z = torch.randn_like(x) if has_z else None state_ref_intermediate = {} out_ref_list = [] for seq_idx in range(batch_size): seq_start = cu_seqlens[seq_idx].item() seq_end = cu_seqlens[seq_idx + 1].item() num_tokens = seq_end - seq_start token_pos = max(num_accepted_tokens[seq_idx].item() - 1, 0) initial_slot = state_batch_indices[seq_idx, token_pos].item() state_seq = state[initial_slot : initial_slot + 1].clone() for token_idx in range(num_tokens): global_idx = seq_start + token_idx out_token = selective_state_update_ref( state_seq, x[global_idx : global_idx + 1], dt[global_idx : global_idx + 1], A, B[global_idx : global_idx + 1], C[global_idx : global_idx + 1], D=D, z=z[global_idx : global_idx + 1] if has_z else None, dt_bias=dt_bias, dt_softplus=True, ) out_ref_list.append(out_token) state_ref_intermediate[(seq_idx, token_idx)] = state_seq.clone() out_ref = torch.cat(out_ref_list, dim=0) selective_state_update( state, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True, out=out, cu_seqlens=cu_seqlens, state_batch_indices=state_batch_indices, dst_state_batch_indices=dst_state_batch_indices, num_accepted_tokens=num_accepted_tokens, pad_slot_id=PAD_SLOT_ID, ) assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) for seq_idx in range(batch_size): num_tokens = tokens_per_seq[seq_idx].item() for token_idx in range(num_tokens): dst_slot = dst_slots_map[(seq_idx, token_idx)]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/mamba/test_mamba_mixer2.py
tests/kernels/mamba/test_mamba_mixer2.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import unittest import pytest import torch from tests.utils import multi_gpu_test from vllm.distributed.parallel_state import ( init_distributed_environment, initialize_model_parallel, ) from vllm.model_executor.layers.mamba.mamba_mixer2 import Mixer2RMSNormGated from vllm.platforms import current_platform from vllm.utils.system_utils import update_environment_variables @multi_gpu_test(num_gpus=2) @pytest.mark.parametrize("batch_size", [8]) @pytest.mark.parametrize("seq_len", [128]) @pytest.mark.parametrize( "hidden_size_n_groups", [ (64, 1), (64, 2), (64, 4), # hidden_size be divisible by num_gpus ], ) @pytest.mark.parametrize("dtype", [torch.float16]) def test_mixer2_gated_norm_multi_gpu( batch_size: int, seq_len: int, hidden_size_n_groups: tuple[int, int], dtype: torch.dtype, device: str = "cuda", ): hidden_size, n_groups = hidden_size_n_groups num_processes = 2 def run_torch_spawn(fn, nprocs): # need to use torch.mp.spawn otherwise will have problems with # torch.distributed and cuda torch.multiprocessing.spawn( fn, args=( num_processes, batch_size, seq_len, hidden_size, n_groups, dtype, device, ), nprocs=nprocs, ) run_torch_spawn(mixer2_gated_norm_tensor_parallel, 2) def mixer2_gated_norm_tensor_parallel( local_rank: int, world_size: int, batch_size: int, seq_len: int, hidden_size: int, n_groups: int, dtype: torch.dtype, device: str, ): current_platform.seed_everything(0) device = torch.device(f"cuda:{local_rank}") torch.cuda.set_device(device) torch.set_default_device(device) torch.set_default_dtype(dtype) update_environment_variables( { "RANK": str(local_rank), "LOCAL_RANK": str(local_rank), "WORLD_SIZE": str(world_size), "MASTER_ADDR": "localhost", "MASTER_PORT": "12345", } ) # initialize distributed init_distributed_environment() initialize_model_parallel(tensor_model_parallel_size=world_size) # create random weights an inputs weight = torch.rand((hidden_size,), dtype=dtype, device=device) hidden_states = torch.randn(batch_size, seq_len, hidden_size) gate_states = torch.randn(batch_size, seq_len, hidden_size) # create gated-norm with TP mixer = Mixer2RMSNormGated( full_hidden_size=hidden_size, full_n_groups=n_groups, ) mixer.weight.weight_loader(mixer.weight, weight) # load # create gated-norm without TP to compute reference # - utilize mock patching to disable TP when with ( unittest.mock.patch( "vllm.model_executor.layers.mamba.mamba_mixer2." "get_tensor_model_parallel_world_size", return_value=1, ), unittest.mock.patch( "vllm.model_executor.layers.mamba.mamba_mixer2." "get_tensor_model_parallel_rank", return_value=0, ), ): mixer_single_gpu = Mixer2RMSNormGated( full_hidden_size=hidden_size, full_n_groups=n_groups, ) # assign weight to single-gpu mixer mixer_single_gpu.weight.data = weight # generate and compare N = hidden_size // world_size output = mixer( hidden_states[..., local_rank * N : (local_rank + 1) * N], gate_states[..., local_rank * N : (local_rank + 1) * N], ) ref_output = mixer_single_gpu(hidden_states, gate_states) torch.testing.assert_close( output, ref_output[..., local_rank * N : (local_rank + 1) * N], atol=5e-3, rtol=1e-3, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/core/test_apply_rotary_emb.py
tests/kernels/core/test_apply_rotary_emb.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Tests for ApplyRotaryEmb CustomOp dispatch behavior. This test ensures that RotaryEmbedding classes correctly call the appropriate ApplyRotaryEmb methods based on the calling context: 1. RotaryEmbedding.forward_native() -> ApplyRotaryEmb.forward_native() 2. RotaryEmbedding.forward_cuda() -> ApplyRotaryEmb.forward() (auto-dispatch) 3. RotaryEmbedding.forward_hip() -> ApplyRotaryEmb.forward() (auto-dispatch) """ from dataclasses import dataclass import pytest import torch from vllm.config import ( CompilationConfig, VllmConfig, get_cached_compilation_config, set_current_vllm_config, ) from vllm.platforms import current_platform CUDA_DEVICES = ["cuda:0"] @dataclass class RotaryEmbeddingTestCase: """Test case configuration for RotaryEmbedding dispatch tests.""" name: str rope_class: type rope_kwargs: dict method_name: str # forward_native, forward_cuda, forward positions_shape: tuple # (num_tokens,) or (3, num_tokens) or (4, num_tokens) expect_forward_native: bool # Should call ApplyRotaryEmb.forward_native() expect_forward: bool # Should call ApplyRotaryEmb.forward() def get_test_cases() -> list[RotaryEmbeddingTestCase]: """Generate test cases for all RotaryEmbedding classes.""" from vllm.model_executor.layers.rotary_embedding.ernie45_vl_rope import ( Ernie4_5_VLRotaryEmbedding, ) from vllm.model_executor.layers.rotary_embedding.mrope import MRotaryEmbedding from vllm.model_executor.layers.rotary_embedding.xdrope import XDRotaryEmbedding common_kwargs = { "head_size": 128, "rotary_dim": 128, "max_position_embeddings": 4096, "base": 10000, "is_neox_style": True, "dtype": torch.bfloat16, } return [ # MRotaryEmbedding tests RotaryEmbeddingTestCase( name="MRotaryEmbedding.forward_native", rope_class=MRotaryEmbedding, rope_kwargs={**common_kwargs, "mrope_section": [16, 24, 24]}, method_name="forward_native", positions_shape=(3, 32), # 2D for multimodal expect_forward_native=True, expect_forward=False, ), RotaryEmbeddingTestCase( name="MRotaryEmbedding.forward_cuda_1d", rope_class=MRotaryEmbedding, rope_kwargs={**common_kwargs, "mrope_section": [16, 24, 24]}, method_name="forward_cuda", positions_shape=(32,), # 1D triggers apply_rotary_emb path expect_forward_native=False, expect_forward=True, ), # XDRotaryEmbedding tests RotaryEmbeddingTestCase( name="XDRotaryEmbedding.forward", rope_class=XDRotaryEmbedding, rope_kwargs={ **common_kwargs, "scaling_alpha": 1.0, "xdrope_section": [16, 16, 16, 16], }, method_name="forward", positions_shape=(4, 32), # 4D for P/W/H/T expect_forward_native=False, expect_forward=True, ), # Ernie4_5_VLRotaryEmbedding tests RotaryEmbeddingTestCase( name="Ernie4_5_VLRotaryEmbedding.forward_native", rope_class=Ernie4_5_VLRotaryEmbedding, rope_kwargs={**common_kwargs, "mrope_section": [22, 22, 20]}, method_name="forward_native", positions_shape=(3, 32), # 2D for multimodal expect_forward_native=True, expect_forward=False, ), ] def run_dispatch_test( test_case: RotaryEmbeddingTestCase, device: str, ): """Run a dispatch test for a RotaryEmbedding class.""" vllm_config = VllmConfig( compilation_config=CompilationConfig(custom_ops=["all", "+apply_rotary_emb"]) ) get_cached_compilation_config.cache_clear() with set_current_vllm_config(vllm_config): rope = test_case.rope_class(**test_case.rope_kwargs).to(device=device) apply_rotary_emb = rope.apply_rotary_emb # Verify custom op is enabled if test_case.expect_forward_native: assert ( apply_rotary_emb._forward_method != apply_rotary_emb.forward_native ), "Test setup error: ApplyRotaryEmb custom op should be enabled" # Setup call tracking call_tracker = {"forward_native_called": False, "forward_called": False} original_forward_native = apply_rotary_emb.forward_native original_forward = apply_rotary_emb.forward def tracked_forward_native(*args, **kwargs): call_tracker["forward_native_called"] = True return original_forward_native(*args, **kwargs) def tracked_forward(*args, **kwargs): call_tracker["forward_called"] = True return original_forward(*args, **kwargs) apply_rotary_emb.forward_native = tracked_forward_native apply_rotary_emb.forward = tracked_forward try: num_tokens = test_case.positions_shape[-1] num_q_heads = 8 num_kv_heads = 2 head_size = test_case.rope_kwargs["head_size"] max_position = test_case.rope_kwargs["max_position_embeddings"] positions = torch.randint( 0, max_position // 4, test_case.positions_shape, device=device ) query = torch.randn( num_tokens, num_q_heads * head_size, dtype=torch.bfloat16, device=device ) key = torch.randn( num_tokens, num_kv_heads * head_size, dtype=torch.bfloat16, device=device, ) # Call the method under test method = getattr(rope, test_case.method_name) method(positions, query.clone(), key.clone()) # Verify expectations if test_case.expect_forward_native: assert call_tracker["forward_native_called"], ( f"{test_case.name} should call ApplyRotaryEmb.forward_native()" ) if not test_case.expect_forward: assert not call_tracker["forward_called"], ( f"{test_case.name} should NOT call ApplyRotaryEmb.forward(). " "Bug: when +apply_rotary_emb is enabled, forward_native() " "incorrectly dispatches to CUDA/HIP kernels." ) if test_case.expect_forward: assert call_tracker["forward_called"], ( f"{test_case.name} should call ApplyRotaryEmb.forward()" ) finally: apply_rotary_emb.forward_native = original_forward_native apply_rotary_emb.forward = original_forward @pytest.mark.skipif( not current_platform.is_cuda_alike(), reason="Skipping CUDA/ROCm only tests." ) @pytest.mark.parametrize("test_case", get_test_cases(), ids=lambda tc: tc.name) @pytest.mark.parametrize("device", CUDA_DEVICES) def test_rotary_embedding_dispatch( test_case: RotaryEmbeddingTestCase, device: str, ): """ Test that RotaryEmbedding classes dispatch to the correct ApplyRotaryEmb method. - forward_native methods should call ApplyRotaryEmb.forward_native() - forward_cuda/forward methods should call ApplyRotaryEmb.forward() """ run_dispatch_test(test_case, device)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/core/test_pos_encoding.py
tests/kernels/core/test_pos_encoding.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import Callable from itertools import product import pytest import torch from tests.kernels.allclose_default import get_default_atol, get_default_rtol from vllm.model_executor.layers.rotary_embedding import get_rope from vllm.platforms import current_platform IS_NEOX_STYLE = [True, False] DTYPES = [torch.bfloat16, torch.float] HEAD_SIZES = [64, 80, 120, 256] ROTARY_DIMS = [None, 32] # None means rotary dim == head size NUM_HEADS = [17] # Arbitrary values for testing BATCH_SIZES = [5] # Arbitrary values for testing SEQ_LENS = [11, 8192] # Arbitrary values for testing SEEDS = [0] CUDA_DEVICES = [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)] USE_KEY = [True, False] def _get_flat_tensor_shape( batch_size: int, seq_len: int, num_heads: int, head_size: int ) -> tuple[int, ...]: return (batch_size, seq_len, num_heads * head_size) # For testing sliced tensors def _get_padded_tensor_shape( batch_size: int, seq_len: int, num_heads: int, head_size: int ) -> tuple[int, ...]: return (batch_size, seq_len, num_heads, head_size + 64) def _get_batch_tensor_shape( batch_size: int, seq_len: int, num_heads: int, head_size: int ) -> tuple[int, ...]: return (batch_size, seq_len, num_heads, head_size) TENSORS_SHAPES_FN = [ _get_batch_tensor_shape, _get_flat_tensor_shape, _get_padded_tensor_shape, ] @pytest.mark.parametrize("is_neox_style", IS_NEOX_STYLE) @pytest.mark.parametrize("tensor_shape_fn", TENSORS_SHAPES_FN) @pytest.mark.parametrize("batch_size", BATCH_SIZES) @pytest.mark.parametrize("seq_len", SEQ_LENS) @pytest.mark.parametrize("num_heads", NUM_HEADS) @pytest.mark.parametrize("head_size", HEAD_SIZES) @pytest.mark.parametrize("rotary_dim", ROTARY_DIMS) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("use_key", USE_KEY) @torch.inference_mode() def test_rotary_embedding( is_neox_style: bool, tensor_shape_fn: Callable[[int, int, int, int], tuple[int, ...]], batch_size: int, seq_len: int, num_heads: int, head_size: int, rotary_dim: int | None, dtype: torch.dtype, seed: int, device: str, use_key: bool, max_position: int = 8192, rope_theta: float = 10000, ) -> None: if rotary_dim is None: rotary_dim = head_size current_platform.seed_everything(seed) torch.set_default_device(device) if rotary_dim is None: rotary_dim = head_size rope_parameters = { "rope_type": "default", "rope_theta": rope_theta, "partial_rotary_factor": rotary_dim / head_size, } rope = get_rope(head_size, max_position, is_neox_style, rope_parameters) rope = rope.to(dtype=dtype, device=torch.get_default_device()) positions = torch.randint(0, max_position, (batch_size, seq_len)) query_shape = tensor_shape_fn(batch_size, seq_len, num_heads, head_size) query = torch.randn(query_shape, dtype=dtype) key = torch.randn_like(query) if use_key else None # slice tensor if required, noop otherwise query = query[..., :head_size] key = key[..., :head_size] if use_key else None # NOTE(woosuk): The reference implementation should be executed first # because the custom kernel is in-place. ref_query, ref_key = rope.forward_native(positions, query, key) out_query, out_key = rope.forward(positions, query, key) # Compare the results. torch.testing.assert_close( out_query, ref_query, atol=get_default_atol(out_query), rtol=get_default_rtol(out_query), ) if use_key: torch.testing.assert_close( out_key, ref_key, atol=get_default_atol(out_key), rtol=get_default_rtol(out_key), ) else: assert ref_key is None and out_key is None, "expected returned key to be None" @torch.inference_mode() def test_rope_module_cache(): MAX_POSITIONS = [123, 1234] ROPE_THETAS = [10000, 1000000] ROPE_PARAMETERS = ( {"rope_type": "default"}, {"rope_type": "linear", "factor": (1,)}, {"rope_type": "dynamic", "factor": 1}, ) settings = ( HEAD_SIZES, ROTARY_DIMS, MAX_POSITIONS, ROPE_THETAS, IS_NEOX_STYLE, ROPE_PARAMETERS, DTYPES, ) rope_setting_id_map: dict[str, int] = {} for setting in product(*settings): ( head_size, rotary_dim, max_position, rope_theta, is_neox_style, rope_parameters, dtype, ) = setting if rotary_dim is None: rotary_dim = head_size rope_parameters["rope_theta"] = rope_theta rope_parameters["partial_rotary_factor"] = rotary_dim / head_size rope = get_rope( head_size, max_position, is_neox_style, rope_parameters, dtype, ) # different settings cannot share the same rope module assert id(rope) not in rope_setting_id_map.values() assert all(x.dtype == dtype for x in rope.buffers()) assert all(x.dtype == dtype for x in rope.parameters()) rope_setting_id_map[str(setting)] = id(rope) for setting in product(*settings): ( head_size, rotary_dim, max_position, rope_theta, is_neox_style, rope_parameters, dtype, ) = setting if rotary_dim is None: rotary_dim = head_size rope_parameters["rope_theta"] = rope_theta rope_parameters["partial_rotary_factor"] = rotary_dim / head_size rope = get_rope( head_size, max_position, is_neox_style, rope_parameters, dtype, ) # check if cache take effect assert id(rope) == rope_setting_id_map[str(setting)]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/core/test_mrope.py
tests/kernels/core/test_mrope.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from typing import NamedTuple import pytest import torch from packaging.version import Version from transformers import __version__ as TRANSFORMERS_VERSION from vllm.model_executor.layers.rotary_embedding import get_rope from vllm.platforms import current_platform from vllm.transformers_utils.config import get_config device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def generate_test_data( num_tokens: int, num_q_heads: int, num_kv_heads: int, head_size: int, max_position_embeddings: int, dtype: torch.dtype, device: torch.device, ): """Generate test data for given configuration.""" current_platform.seed_everything(42) # Create 2D positions (3, num_tokens) for multimodal case positions = torch.randint( 0, max_position_embeddings // 4, (3, num_tokens), device=device ) # Create query and key tensors query = torch.randn(num_tokens, num_q_heads * head_size, dtype=dtype, device=device) key = torch.randn(num_tokens, num_kv_heads * head_size, dtype=dtype, device=device) return positions, query, key class MRoPETestInfo(NamedTuple): model_name: str # https://github.com/pytorch/pytorch/blob/main/torch/testing/_comparison.py#L1317 atol: float = 1e-2 rtol: float = 1.6e-2 marks: list[pytest.MarkDecorator] = [] TRANSFORMERS_BASE_VERSION = Version(TRANSFORMERS_VERSION).base_version MODELS_TO_TEST = [ MRoPETestInfo(model_name="zai-org/GLM-4.1V-9B-Thinking"), MRoPETestInfo(model_name="Qwen/Qwen2-VL-7B-Instruct"), MRoPETestInfo(model_name="Qwen/Qwen2-VL-72B-Instruct"), MRoPETestInfo(model_name="Qwen/Qwen2.5-VL-72B-Instruct"), MRoPETestInfo( model_name="Qwen/Qwen3-VL-4B-Instruct", marks=[ pytest.mark.skipif( Version(TRANSFORMERS_BASE_VERSION) < Version("4.57.0"), reason="Qwen3-VL only available after Transformers v4.57", ) ], ), MRoPETestInfo( model_name="Qwen/Qwen3-VL-30B-A3B-Instruct", marks=[ pytest.mark.skipif( Version(TRANSFORMERS_BASE_VERSION) < Version("4.57.0"), reason="Qwen3-VL only available after Transformers v4.57", ) ], ), ] num_tokens_list = [11, 8192] @pytest.mark.skipif( not current_platform.is_cuda_alike(), reason="Skipping CUDA/ROCm only tests." ) @pytest.mark.parametrize( "model_info, model_name", [ pytest.param(test_config, test_config.model_name, marks=test_config.marks) for test_config in MODELS_TO_TEST ], ) @pytest.mark.parametrize("tp_size", [1, 2]) @pytest.mark.parametrize("dtype", [torch.bfloat16]) @pytest.mark.parametrize("num_tokens", num_tokens_list) def test_mrope( model_name: str, model_info: MRoPETestInfo, tp_size: int, dtype: torch.dtype, num_tokens: int, ): atol = model_info.atol rtol = model_info.rtol config = get_config(model_name, False).get_text_config() # get the model config total_num_kv_heads = config.num_key_value_heads total_num_heads = config.num_attention_heads num_heads = total_num_heads // tp_size num_kv_heads = max(1, total_num_kv_heads // tp_size) head_dim = ( config.head_dim if hasattr(config, "head_dim") else config.hidden_size // total_num_heads ) is_neox_style = True max_position = config.max_position_embeddings mrope_helper_class = get_rope( head_size=head_dim, max_position=max_position, is_neox_style=is_neox_style, rope_parameters=config.rope_parameters, dtype=dtype, ).to(device=device) # create q k v input tensors # create rotary pos emb input tensors positions, query, key = generate_test_data( num_tokens, num_heads, num_kv_heads, head_dim, max_position, dtype, device ) query_native, key_native = mrope_helper_class.forward_native( positions, query.clone(), key.clone(), ) query_cuda, key_cuda = mrope_helper_class.forward_cuda( positions, query.clone(), key.clone(), ) torch.testing.assert_close(query_native, query_cuda, atol=atol, rtol=rtol) torch.testing.assert_close(key_native, key_cuda, atol=atol, rtol=rtol) @pytest.mark.skipif( not current_platform.is_cuda_alike(), reason="Skipping CUDA/ROCm only tests." ) @pytest.mark.parametrize( "model_info, model_name", [ pytest.param(test_config, test_config.model_name, marks=test_config.marks) for test_config in MODELS_TO_TEST ], ) @pytest.mark.parametrize("tp_size", [1, 2]) @pytest.mark.parametrize("dtype", [torch.bfloat16]) @pytest.mark.parametrize("num_tokens", num_tokens_list) def test_mrope_torch_compile_tracing( model_name: str, model_info: MRoPETestInfo, tp_size: int, dtype: torch.dtype, num_tokens: int, ): atol = model_info.atol rtol = model_info.rtol config = get_config(model_name, False).get_text_config() # get the model config total_num_kv_heads = config.num_key_value_heads total_num_heads = config.num_attention_heads num_heads = total_num_heads // tp_size num_kv_heads = max(1, total_num_kv_heads // tp_size) head_dim = ( config.head_dim if hasattr(config, "head_dim") else config.hidden_size // total_num_heads ) is_neox_style = True max_position = config.max_position_embeddings mrope_helper_class = get_rope( head_size=head_dim, max_position=max_position, is_neox_style=is_neox_style, rope_parameters=config.rope_parameters, dtype=dtype, ).to(device=device) # Generate test data positions, query, key = generate_test_data( num_tokens, num_heads, num_kv_heads, head_dim, max_position, dtype, device ) # Create a wrapper that makes the in-place function appear functional def functional_forward_cuda(pos, q, k): """Wrapper that converts in-place operation to functional style CUDA Graph does not support in-place operations. This wrapper creates working copies of the input tensors and modifies them. """ q_work = q.clone() # Create working copies k_work = k.clone() # Your in-place function modifies q_work and k_work mrope_helper_class.forward_cuda(pos, q_work, k_work) return q_work, k_work # Return the modified tensors # Get reference results query_native, key_native = mrope_helper_class.forward_native( positions, query.clone(), key.clone(), ) try: compiled_forward_cuda = torch.compile( functional_forward_cuda, fullgraph=True, backend="inductor", mode="reduce-overhead", dynamic=False, ) # Run compiled version query_compiled_cuda, key_compiled_cuda = compiled_forward_cuda( positions, query, key, ) # Run original version for comparison query_cuda = query.clone() key_cuda = key.clone() mrope_helper_class.forward_cuda(positions, query_cuda, key_cuda) # Verify results torch.testing.assert_close( query_compiled_cuda, query_cuda, atol=atol, rtol=rtol ) torch.testing.assert_close(key_compiled_cuda, key_cuda, atol=atol, rtol=rtol) torch.testing.assert_close( query_compiled_cuda, query_native, atol=atol, rtol=rtol ) torch.testing.assert_close(key_compiled_cuda, key_native, atol=atol, rtol=rtol) print("✓ forward_cuda successfully traced with torch.compile inductor") except Exception as e: pytest.fail(f"forward_cuda failed to trace with torch.compile inductor: {e}")
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/core/test_layernorm.py
tests/kernels/core/test_layernorm.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from tests.kernels.quant_utils import FP8_DTYPE from tests.kernels.utils import opcheck from vllm.model_executor.layers.layernorm import RMSNorm from vllm.platforms import current_platform DTYPES = [torch.half, torch.bfloat16, torch.float] NUM_TOKENS = [7, 83, 4096] # Arbitrary values for testing HIDDEN_SIZES = [8, 768, 769, 5120, 5125, 8192] # Arbitrary values for testing ADD_RESIDUAL = [False, True] SEEDS = [0] CUDA_DEVICES = [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)] @pytest.mark.parametrize("num_tokens", NUM_TOKENS) @pytest.mark.parametrize("hidden_size", HIDDEN_SIZES) @pytest.mark.parametrize("add_residual", ADD_RESIDUAL) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("strided_input", [False, True]) @torch.inference_mode() def test_rms_norm( num_tokens: int, hidden_size: int, add_residual: bool, dtype: torch.dtype, seed: int, device: str, strided_input: bool, ) -> None: current_platform.seed_everything(seed) torch.set_default_device(device) layer = RMSNorm(hidden_size).to(dtype=dtype) layer.weight.data.normal_(mean=1.0, std=0.1) scale = 1 / (2 * hidden_size) last_dim = 2 * hidden_size if strided_input else hidden_size x = torch.randn(num_tokens, last_dim, dtype=dtype) x = x[..., :hidden_size] assert x.is_contiguous() != strided_input x *= scale residual = torch.randn_like(x) * scale if add_residual else None # NOTE(woosuk): The reference implementation should be executed first # because the custom kernel is in-place. ref_out = layer.forward_native(x, residual) out = layer(x, residual) # NOTE(woosuk): LayerNorm operators (including RMS) typically have larger # numerical errors than other operators because they involve reductions. # Therefore, we use a larger tolerance. if add_residual: torch.testing.assert_close(out[0], ref_out[0], atol=1e-2, rtol=1e-2) torch.testing.assert_close(out[1], ref_out[1], atol=1e-2, rtol=1e-2) else: torch.testing.assert_close(out, ref_out, atol=1e-2, rtol=1e-2) if residual is not None: opcheck( torch.ops._C.fused_add_rms_norm, (x, residual, layer.weight.data, layer.variance_epsilon), ) else: opcheck( torch.ops._C.rms_norm, (out, x, layer.weight.data, layer.variance_epsilon) ) @pytest.mark.parametrize("num_tokens", NUM_TOKENS) @pytest.mark.parametrize("hidden_size", HIDDEN_SIZES) @pytest.mark.parametrize("add_residual", ADD_RESIDUAL) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("quant_scale", [0.01, 1.0, 10.0]) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("strided_input", [False, True]) def test_fused_rms_norm_quant( num_tokens: int, hidden_size: int, add_residual: bool, dtype: torch.dtype, quant_scale: float, seed: int, device: str, strided_input: bool, ) -> None: current_platform.seed_everything(seed) torch.set_default_device(device) weight = torch.empty(hidden_size, dtype=dtype).normal_(mean=1.0, std=0.1) scale = 1 / (2 * hidden_size) last_dim = 2 * hidden_size if strided_input else hidden_size x_base = torch.randn(num_tokens, last_dim, dtype=dtype) x = x_base[..., :hidden_size] assert x.is_contiguous() != strided_input x *= scale if add_residual: residual = torch.randn_like(x) * scale residual_fused = residual.clone() else: residual = residual_fused = None out_norm = torch.empty_like(x) out_quant = torch.empty_like(x, dtype=FP8_DTYPE) out_quant_fused = torch.empty_like(out_quant) quant_scale_t = torch.tensor(quant_scale, dtype=torch.float32) if add_residual: torch.ops._C.fused_add_rms_norm_static_fp8_quant( out_quant_fused, x, residual_fused, weight, quant_scale_t, 1e-6 ) # Unfused kernel is in-place so it goes second # Also use a separate clone of x to avoid modifying the input x_unfused_base = x_base.clone() x_unfused = x_unfused_base[..., :hidden_size] assert x_unfused.is_contiguous() != strided_input torch.ops._C.fused_add_rms_norm(x_unfused, residual, weight, 1e-6) torch.ops._C.static_scaled_fp8_quant( out_quant, x_unfused.contiguous(), quant_scale_t ) torch.cuda.synchronize() torch.testing.assert_close(residual_fused, residual, atol=1e-2, rtol=1e-2) opcheck( torch.ops._C.fused_add_rms_norm_static_fp8_quant, (out_quant_fused, x, residual_fused, weight, quant_scale_t, 1e-6), ) else: torch.ops._C.rms_norm_static_fp8_quant( out_quant_fused, x, weight, quant_scale_t, 1e-6 ) torch.ops._C.rms_norm(out_norm, x, weight, 1e-6) torch.ops._C.static_scaled_fp8_quant(out_quant, out_norm, quant_scale_t) opcheck( torch.ops._C.rms_norm_static_fp8_quant, (out_quant_fused, x, weight, quant_scale_t, 1e-6), ) torch.testing.assert_close( out_quant.to(dtype=torch.float32), out_quant_fused.to(dtype=torch.float32), atol=1e-3, rtol=1e-3, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/core/test_fused_quant_layernorm.py
tests/kernels/core/test_fused_quant_layernorm.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch import vllm._custom_ops as ops from tests.kernels.utils import opcheck from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.quantization.utils.fp8_utils import ( per_token_group_quant_fp8, ) from vllm.model_executor.layers.quantization.utils.int8_utils import ( per_token_group_quant_int8, ) DTYPES = [torch.bfloat16, torch.float] QUANT_DTYPES = [torch.int8, torch.float8_e4m3fn] VEC_HIDDEN_SIZES = [1024, 1025, 1027, 1029] # Avoid combinatorial explosion with full Cartesian product NUM_TOKENS_HIDDEN_SIZES = [ *[(1, i) for i in [1, 64, *VEC_HIDDEN_SIZES, 5120, 5137]], *[(2048, i) for i in [1, 64, *VEC_HIDDEN_SIZES, 5137]], *[(4096, i) for i in [1, 64, 5137]], ] ADD_RESIDUAL = [False, True] SCALE_UBS = [True, False] GROUP_SIZES = [None, [1, 64], [1, 128]] SEEDS = [0] CUDA_DEVICES = [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)] EPS = 1e-6 ## Helpers def as_float32_tensor(x: float | torch.Tensor) -> torch.Tensor: return torch.as_tensor(x, dtype=torch.float32, device="cuda") def ref_rms_norm( rms_norm_layer: RMSNorm, x: torch.Tensor, residual: torch.Tensor | None ) -> tuple[torch.Tensor, torch.Tensor | None]: if residual is not None: residual = residual.clone() out, residual = rms_norm_layer.forward_native(x, residual) else: out = rms_norm_layer.forward_native(x) return out, residual def ref_dynamic_per_token_or_block_quant( rms_norm_layer: RMSNorm, x: torch.Tensor, quant_dtype: torch.dtype, residual: torch.Tensor | None, scale_ub: torch.Tensor | None, group_size: list[int] | None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor | None]: if scale_ub is not None: assert quant_dtype == torch.float8_e4m3fn # Norm torch_out, residual = ref_rms_norm(rms_norm_layer, x, residual) # Quant if group_size is not None: if quant_dtype == torch.float8_e4m3fn: torch_out, scales = per_token_group_quant_fp8( torch_out, group_size=group_size[1], use_ue8m0=False ) else: assert quant_dtype == torch.int8 torch_out, scales = per_token_group_quant_int8( torch_out, group_size=group_size[1] ) else: if quant_dtype == torch.float8_e4m3fn: torch_out, scales = ops.scaled_fp8_quant( torch_out, scale_ub=scale_ub, use_per_token_if_dynamic=True ) else: assert quant_dtype == torch.int8 torch_out, scales, _ = ops.scaled_int8_quant(torch_out) return torch_out, scales, residual def ref_impl( rms_norm_layer: RMSNorm, x: torch.Tensor, quant_dtype: torch.dtype, residual: torch.Tensor | None, scale_ub: torch.Tensor | None, group_size: list[int] | None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor | None]: return ref_dynamic_per_token_or_block_quant( rms_norm_layer, x, quant_dtype, residual, scale_ub, group_size ) def ops_dynamic_per_token_or_block_quant( weight: torch.Tensor, x: torch.Tensor, quant_dtype: torch.dtype, residual: torch.Tensor | None, scale_ub: torch.Tensor | None, group_size: list[int] | None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor | None]: if residual is not None: residual = residual.clone() if group_size is not None: out, scales = ops.rms_norm_per_block_quant( x, weight, EPS, quant_dtype, group_size, scale_ub, residual, True ) scales = scales.contiguous() else: out, scales = ops.rms_norm_dynamic_per_token_quant( x, weight, EPS, quant_dtype, scale_ub, residual ) return out, scales, residual def ops_impl( weight: torch.Tensor, x: torch.Tensor, quant_dtype: torch.dtype, residual: torch.Tensor | None, scale_ub: torch.Tensor | None, group_size: list[int] | None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor | None]: return ops_dynamic_per_token_or_block_quant( weight, x, quant_dtype, residual, scale_ub, group_size ) @pytest.mark.parametrize("num_tokens, hidden_size", NUM_TOKENS_HIDDEN_SIZES) @pytest.mark.parametrize("add_residual", ADD_RESIDUAL) @pytest.mark.parametrize("has_scale_ub", SCALE_UBS) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("quant_dtype", QUANT_DTYPES) @pytest.mark.parametrize("group_size", GROUP_SIZES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.parametrize("device", CUDA_DEVICES) @torch.inference_mode() def test_rms_norm( num_tokens: int, hidden_size: int, add_residual: bool, has_scale_ub: bool, dtype: torch.dtype, quant_dtype: torch.dtype, group_size: list[int] | None, seed: int, device: str, ) -> None: torch.random.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.set_default_device(device) if group_size is not None and hidden_size % group_size[1] != 0: # skip return if group_size is not None and has_scale_ub: # blockwise baseline doesn't support scale_ub return if has_scale_ub and quant_dtype != torch.float8_e4m3fn: # skip return layer = RMSNorm(hidden_size, EPS).to(dtype=dtype) # Make weights layer.weight.data.normal_(mean=1.0, std=0.1) # Make inputs scale = 1 / (hidden_size) x = torch.randn(num_tokens, hidden_size, dtype=dtype) * scale residual = torch.randn_like(x) * scale if add_residual else None if has_scale_ub: rms_x, _ = ref_rms_norm(layer, x, residual) scale_ub = torch.mean(rms_x).to(dtype=torch.float32, device="cuda") else: scale_ub = None ref_out, ref_scales, ref_residual = ref_impl( layer, x, quant_dtype, residual, scale_ub, group_size ) ops_out, ops_scales, ops_residual = ops_impl( layer.weight, x, quant_dtype, residual, scale_ub, group_size ) assert ref_out.dtype == quant_dtype assert ops_out.dtype == quant_dtype if quant_dtype == torch.int8: assert torch.allclose(ref_scales, ops_scales, atol=1e-6) # big atol to account for round-off errors. assert torch.allclose(ref_out, ops_out, atol=1) else: assert torch.allclose(ref_scales, ops_scales) a = ref_out.to(dtype=torch.float32) b = ops_out.to(dtype=torch.float32) ok = torch.allclose(a, b, atol=1e-6) if not ok: # fallback: compare dequantized values with relaxed tolerance if group_size is None: a_deq = a * ref_scales.view(-1, 1) b_deq = b * ops_scales.view(-1, 1) else: a_deq = a * ref_scales.repeat_interleave(group_size[1], dim=1) b_deq = b * ops_scales.repeat_interleave(group_size[1], dim=1) # NOTE: It is possible that some future test cases trigger this # max diff due to precision issues. If such an error is # encountered, it's recommended to inspect the differences between # all corresponding elements from each tensor (e.g. by looping over # them) and checking how many the max diff error shows up on (just # a few bad elements should still be considered acceptable). ok = torch.allclose(a_deq, b_deq, rtol=5e-2, atol=5e-2) assert ok if add_residual: assert torch.allclose(ref_residual, ops_residual) output = torch.empty_like(x, dtype=quant_dtype) scales = torch.empty( (x.numel() // x.shape[-1], 1), device=x.device, dtype=torch.float32 ) opcheck( torch.ops._C.rms_norm_dynamic_per_token_quant, (output, x, layer.weight, scales, 1e-5, scale_ub, residual), )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/core/test_activation.py
tests/kernels/core/test_activation.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import random import pytest import torch from tests.kernels.allclose_default import get_default_atol, get_default_rtol from tests.kernels.utils import opcheck from vllm.model_executor.layers.activation import ( FastGELU, FatreluAndMul, GeluAndMul, MulAndSilu, NewGELU, QuickGELU, SiluAndMul, SwigluOAIAndMul, ) from vllm.platforms import current_platform DTYPES = [torch.half, torch.bfloat16, torch.float] NUM_TOKENS = [7, 83, 2048] # Arbitrary values for testing D = [512, 13824] # Arbitrary values for testing SEEDS = [0] CUDA_DEVICES = [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)] @pytest.mark.parametrize( "activation", [ "silu_and_mul", "mul_and_silu", "gelu", "gelu_tanh", "fatrelu", "swigluoai_and_mul", ], ) @pytest.mark.parametrize("num_tokens", NUM_TOKENS) @pytest.mark.parametrize("d", D) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.parametrize("device", CUDA_DEVICES) @torch.inference_mode() def test_act_and_mul( activation: str, num_tokens: int, d: int, dtype: torch.dtype, seed: int, device: str, ) -> None: current_platform.seed_everything(seed) torch.set_default_device(device) x = torch.randn(num_tokens, 2 * d, dtype=dtype) if activation == "silu_and_mul": layer = SiluAndMul() fn = torch.ops._C.silu_and_mul if activation == "mul_and_silu": layer = MulAndSilu() fn = torch.ops._C.mul_and_silu elif activation == "gelu": layer = GeluAndMul(approximate="none") fn = torch.ops._C.gelu_and_mul elif activation == "gelu_tanh": layer = GeluAndMul(approximate="tanh") fn = torch.ops._C.gelu_tanh_and_mul elif activation == "fatrelu": threshold = random.uniform(0, 1) layer = FatreluAndMul(threshold) fn = torch.ops._C.fatrelu_and_mul elif activation == "swigluoai_and_mul": layer = SwigluOAIAndMul() fn = torch.ops._C.swigluoai_and_mul out = layer(x) ref_out = layer.forward_native(x) if activation == "swigluoai_and_mul": rtol = { # For fp16, change the relative tolerance from 1e-3 to 2e-3 torch.float16: 2e-3, torch.bfloat16: 2e-2, torch.float: 1.3e-6, } def _get_rtol(output) -> float: return rtol[output.dtype] torch.testing.assert_close( out, ref_out, atol=get_default_atol(out), rtol=_get_rtol(out) ) else: # The SiluAndMul, MulAndSilu, GELU and FatReLU implementations are # equivalent to the native PyTorch implementations, so we can do exact # comparison. torch.testing.assert_close(out, ref_out, atol=0.0, rtol=0.0) d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) out = torch.empty(output_shape, dtype=x.dtype, device=x.device) if activation == "fatrelu": opcheck(fn, (out, x, threshold)) elif activation == "swigluoai_and_mul": opcheck(fn, (out, x, layer.alpha, layer.limit)) else: opcheck(fn, (out, x)) @pytest.mark.parametrize( "activation", [ (FastGELU, torch.ops._C.gelu_fast), (NewGELU, torch.ops._C.gelu_new), (QuickGELU, torch.ops._C.gelu_quick), ], ) @pytest.mark.parametrize("num_tokens", NUM_TOKENS) @pytest.mark.parametrize("d", D) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.parametrize("device", CUDA_DEVICES) @torch.inference_mode() def test_activation( activation: type[torch.nn.Module], num_tokens: int, d: int, dtype: torch.dtype, seed: int, device: str, ) -> None: current_platform.seed_everything(seed) torch.set_default_device(device) x = torch.randn(num_tokens, d, dtype=dtype) layer = activation[0]() fn = activation[1] out = layer(x) ref_out = layer.forward_native(x) torch.testing.assert_close( out, ref_out, atol=get_default_atol(out), rtol=get_default_rtol(out) ) out = torch.empty_like(x) opcheck(fn, (out, x))
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/core/test_uva.py
tests/kernels/core/test_uva.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from vllm.utils.platform_utils import is_uva_available from vllm.utils.torch_utils import get_cuda_view_from_cpu_tensor CUDA_DEVICES = [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)] @pytest.mark.skipif(not is_uva_available(), reason="UVA is not available.") @pytest.mark.parametrize("device", CUDA_DEVICES) def test_cpu_write(device): torch.set_default_device(device) cpu_tensor = torch.zeros(10, 10, device="cpu", pin_memory=True, dtype=torch.int32) cuda_view = get_cuda_view_from_cpu_tensor(cpu_tensor) assert cuda_view.device.type == "cuda" assert cuda_view[0, 0] == 0 assert cuda_view[2, 3] == 0 assert cuda_view[4, 5] == 0 cpu_tensor[0, 0] = 1 cpu_tensor[2, 3] = 2 cpu_tensor[4, 5] = -1 cuda_view.mul_(2) assert cuda_view[0, 0] == 2 assert cuda_view[2, 3] == 4 assert cuda_view[4, 5] == -2 @pytest.mark.skipif(not is_uva_available(), reason="UVA is not available.") @pytest.mark.parametrize("device", CUDA_DEVICES) def test_gpu_write(device): torch.set_default_device(device) cpu_tensor = torch.zeros(10, 10, device="cpu", pin_memory=True, dtype=torch.int32) cuda_view = get_cuda_view_from_cpu_tensor(cpu_tensor) assert cuda_view.device.type == "cuda" assert cuda_view[0, 0] == 0 assert cuda_view[2, 3] == 0 assert cuda_view[4, 5] == 0 cuda_view[0, 0] = 1 cuda_view[2, 3] = 2 cuda_view[4, 5] = -1 cuda_view.mul_(2) assert cpu_tensor[0, 0] == 2 assert cpu_tensor[2, 3] == 4 assert cpu_tensor[4, 5] == -2
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/core/test_rotary_embedding.py
tests/kernels/core/test_rotary_embedding.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Tests for miscellaneous utilities """ import pytest import torch from tests.kernels.utils import opcheck from vllm.model_executor.layers.rotary_embedding import RotaryEmbedding def rotary_embedding_opcheck( rot, positions: torch.Tensor, query: torch.Tensor, key: torch.Tensor | None = None, ): cos_sin_cache = rot.cos_sin_cache.to(query.device, dtype=query.dtype) # ops.rotary_embedding() is a in-place operation # that updates the query and key tensors. opcheck( torch.ops._C.rotary_embedding, (positions, query, key, rot.head_size, cos_sin_cache, rot.is_neox_style), ) @pytest.mark.parametrize("device", ["cuda"]) @pytest.mark.parametrize("max_position", [11, 4096, 32768]) @pytest.mark.parametrize("is_neox_style", [True, False]) @pytest.mark.parametrize("rotary_dim", [32]) @pytest.mark.parametrize("head_size", [32, 108]) @pytest.mark.parametrize("seq_len", [11, 1024]) @pytest.mark.parametrize("use_key", [True, False]) @pytest.mark.parametrize("head_stride_is_contiguous", [True, False]) def test_rotary_embedding_opcheck( dist_init, device, max_position, is_neox_style, rotary_dim, head_size, seq_len, use_key, head_stride_is_contiguous, ): batch_size = 1 base = 10000 num_heads = 7 rot = RotaryEmbedding( head_size, rotary_dim, max_position, base, is_neox_style, torch.float32 ) positions = torch.randint(0, max_position, (batch_size, seq_len), device=device) head_stride = head_size + (64 if head_stride_is_contiguous else 0) query = torch.randn( batch_size, seq_len, num_heads, head_stride, dtype=torch.float32, device=device ) key = torch.randn_like(query) if use_key else None query = query[..., :head_size] key = key[..., :head_size] if use_key else None rotary_embedding_opcheck(rot, positions, query, key) # if we have a contiguous head stride, test the alternate # [..., num_heads * head_dim] shape/layout if head_stride_is_contiguous: rotary_embedding_opcheck( rot, positions, query.flatten(start_dim=-2), key.flatten(start_dim=-2) if use_key else None, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/core/test_fused_qk_norm_rope.py
tests/kernels/core/test_fused_qk_norm_rope.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from tests.kernels.utils import opcheck from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.rotary_embedding import RotaryEmbedding from vllm.platforms import current_platform DTYPES = [torch.bfloat16, torch.float16] IS_NEOX = [True, False] EPS_VALUES = [1e-5, 1e-6] SEEDS = [13] PARTIAL_ROPE = [True, False] CUDA_DEVICES = ["cuda:0"] def _apply_qk_norm_rope( qkv: torch.Tensor, positions: torch.Tensor, q_norm: RMSNorm, k_norm: RMSNorm, rope: RotaryEmbedding, num_heads_q: int, num_heads_kv: int, head_dim: int, ) -> torch.Tensor: q_size = num_heads_q * head_dim kv_size = num_heads_kv * head_dim q, k, v = qkv.split([q_size, kv_size, kv_size], dim=-1) q_by_head = q.view(*q.shape[:-1], q.shape[-1] // head_dim, head_dim) q_by_head = q_norm.forward_native(q_by_head) q = q_by_head.view(q.shape) k_by_head = k.view(*k.shape[:-1], k.shape[-1] // head_dim, head_dim) k_by_head = k_norm.forward_native(k_by_head) k = k_by_head.view(k.shape) q, k = rope.forward_native(positions, q, k) return torch.cat([q, k, v], dim=-1) @pytest.mark.skipif( not current_platform.is_cuda_alike(), reason="fused_qk_norm_rope custom op requires cuda and rocm platform", ) @pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("is_neox", IS_NEOX) @pytest.mark.parametrize("eps", EPS_VALUES) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.parametrize("rotary_ratio", [1.0, 0.5, 0.25]) @torch.inference_mode() def test_fused_qk_norm_rope_matches_reference( device: str, dtype: torch.dtype, is_neox: bool, eps: float, seed: int, rotary_ratio: float, ): torch.set_default_device(device) current_platform.seed_everything(seed) num_heads, num_kv_heads, head_dim = 16, 4, 128 num_tokens = 4 total_dim = (num_heads + 2 * num_kv_heads) * head_dim qkv_base = torch.randn(num_tokens, total_dim, dtype=dtype, device=device) qkv_fused = qkv_base.clone() positions = torch.arange(num_tokens, dtype=torch.long, device=device) q_norm = RMSNorm(head_dim, eps=eps).to(device=device, dtype=dtype) k_norm = RMSNorm(head_dim, eps=eps).to(device=device, dtype=dtype) q_norm.weight.data.normal_(mean=1.0, std=0.1) k_norm.weight.data.normal_(mean=1.0, std=0.1) q_weight = q_norm.weight.data k_weight = k_norm.weight.data rotary_dim = int(head_dim * rotary_ratio) rope = RotaryEmbedding( head_size=head_dim, rotary_dim=rotary_dim, max_position_embeddings=4096, base=10000.0, is_neox_style=is_neox, dtype=dtype, ).to(device) ref_result = _apply_qk_norm_rope( qkv=qkv_base, positions=positions, q_norm=q_norm, k_norm=k_norm, rope=rope, num_heads_q=num_heads, num_heads_kv=num_kv_heads, head_dim=head_dim, ) opcheck( torch.ops._C.fused_qk_norm_rope, ( qkv_fused.clone(), num_heads, num_kv_heads, num_kv_heads, head_dim, eps, q_weight, k_weight, rope.cos_sin_cache, is_neox, positions.view(-1), ), ) torch.ops._C.fused_qk_norm_rope( qkv_fused, num_heads, num_kv_heads, num_kv_heads, head_dim, eps, q_weight, k_weight, rope.cos_sin_cache, is_neox, positions.view(-1), ) if dtype == torch.float16: ATOL, RTOL = (2e-3, 2e-3) else: ATOL, RTOL = (1e-2, 1e-2) torch.testing.assert_close( qkv_fused, ref_result, atol=ATOL, rtol=RTOL, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/core/test_opcheck.py
tests/kernels/core/test_opcheck.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Tests for miscellaneous utilities """ import torch from tests.kernels.utils import opcheck def test_convert_fp8_opcheck(): data = torch.randn((256, 256), dtype=torch.float32, device="cuda") result = torch.empty_like(data, dtype=torch.float8_e4m3fn) opcheck(torch.ops._C_cache_ops.convert_fp8, (result, data, 1.0, "fp8")) # TODO: Add this back, currently fails with # csrc/cuda_utils_kernels.cu:15 'invalid argument' # @pytest.mark.skipif(not current_platform.is_cuda(), # reason="Only supported for CUDA") # def test_cuda_utils_opcheck(): # opcheck(torch.ops._C_cuda_utils.get_device_attribute, (0, 0)) # opcheck( # torch.ops._C_cuda_utils. # get_max_shared_memory_per_block_device_attribute, (0, ))
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/core/test_permute_cols.py
tests/kernels/core/test_permute_cols.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from tests.kernels.utils import opcheck from vllm._custom_ops import permute_cols @pytest.mark.parametrize("shape", [(1, 512), (544, 4096), (67, 8192)]) @pytest.mark.parametrize("dtype", [torch.bfloat16]) def test_permute_cols(shape, dtype): x = torch.randn(shape, dtype=dtype).cuda() perm = torch.randperm(x.shape[1]).to(torch.int).cuda() opcheck(torch.ops._C.permute_cols, (x, perm)) y = permute_cols(x, perm) torch.testing.assert_close(y, x[:, perm])
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_deepgemm.py
tests/kernels/moe/test_deepgemm.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Unit-test DeepGEMM FP8 kernels (no DeepEP). Compare DeepGEMM path against the Triton fallback inside vLLM's fused_experts. """ import importlib import math import pytest import torch from vllm.model_executor.layers.fused_moe.config import fp8_w8a8_moe_quant_config # vLLM fused-expert reference (Triton fallback + DeepGEMM option) from vllm.model_executor.layers.fused_moe.fused_moe import fused_experts from vllm.model_executor.layers.quantization.utils.fp8_utils import ( per_token_group_quant_fp8, ) from vllm.utils.deep_gemm import ( calc_diff, is_deep_gemm_supported, per_block_cast_to_fp8, ) BLOCK_SIZE = [128, 128] def make_block_quant_fp8_weights( e: int, n: int, k: int, block_size: list[int], ): """ Generate (w1, w2) expert weights and their per-block scale tensors in FP8 block-quantized format. w1 shape: (E, 2N, K) w2 shape: (E, K, N) """ dtype = torch.bfloat16 fp8_max, fp8_min = ( torch.finfo(torch.float8_e4m3fn).max, torch.finfo(torch.float8_e4m3fn).min, ) # bf16 reference weights w1_bf16 = torch.randn(e, 2 * n, k, device="cuda", dtype=dtype) / 10 w2_bf16 = torch.randn(e, k, n, device="cuda", dtype=dtype) / 10 w1_bf16.clamp_(fp8_min, fp8_max) w2_bf16.clamp_(fp8_min, fp8_max) block_n, block_k = block_size n_tiles_w1 = math.ceil((2 * n) / block_n) k_tiles_w1 = math.ceil(k / block_k) n_tiles_w2 = math.ceil(k / block_n) k_tiles_w2 = math.ceil(n / block_k) w1 = torch.empty_like(w1_bf16, dtype=torch.float8_e4m3fn) w2 = torch.empty_like(w2_bf16, dtype=torch.float8_e4m3fn) w1_s = torch.empty(e, n_tiles_w1, k_tiles_w1, device="cuda", dtype=torch.float32) w2_s = torch.empty(e, n_tiles_w2, k_tiles_w2, device="cuda", dtype=torch.float32) for i in range(e): w1[i], w1_s[i] = per_block_cast_to_fp8( w1_bf16[i], block_size=block_size, use_ue8m0=True ) w2[i], w2_s[i] = per_block_cast_to_fp8( w2_bf16[i], block_size=block_size, use_ue8m0=True ) return w1, w2, w1_s, w2_s def run_single_case(m, n, k, topk, num_experts, block_size): """ Run one (M,N,K) configuration on a single GPU and assert DeepGEMM == Triton baseline within tolerance. """ tokens_bf16 = ( torch.randn(m, k, device="cuda", dtype=torch.bfloat16) .clamp_min_(-1) .clamp_max_(1) ) _, a1_scale = per_token_group_quant_fp8(tokens_bf16, block_size[1]) # expert weight tensors w1, w2, w1_s, w2_s = make_block_quant_fp8_weights(num_experts, n, k, block_size) router_logits = torch.randn(m, num_experts, device="cuda", dtype=torch.float32) topk_weights, topk_ids = torch.topk(router_logits, k=topk, dim=-1) topk_weights = torch.nn.functional.softmax(topk_weights, dim=-1) quant_config = fp8_w8a8_moe_quant_config( w1_scale=w1_s, w2_scale=w2_s, a1_scale=a1_scale, block_shape=block_size, ) # triton reference out_triton = fused_experts( hidden_states=tokens_bf16, w1=w1, w2=w2, topk_weights=topk_weights, topk_ids=topk_ids, inplace=False, quant_config=quant_config, allow_deep_gemm=False, ) # DeepGemm out_deepgemm = fused_experts( hidden_states=tokens_bf16, w1=w1, w2=w2, topk_weights=topk_weights, topk_ids=topk_ids, inplace=False, quant_config=quant_config, allow_deep_gemm=True, ) diff = calc_diff(out_deepgemm, out_triton) assert diff < 0.001, f"Diff exceeded 1%: {diff}" # Note: N <= 512 will disable the deepgemm path due to performance issues. MNKs = [ (1024, 768, 128), (2048, 768, 512), (512, 1024, 1024), (4096, 4096, 1024), ] TOPKS = [2, 6] NUM_EXPERTS = [32] @pytest.mark.parametrize(("m", "n", "k"), MNKs) @pytest.mark.parametrize("topk", TOPKS) @pytest.mark.parametrize("num_experts", NUM_EXPERTS) @pytest.mark.skipif(not is_deep_gemm_supported(), reason="Requires deep_gemm kernels") def test_deepgemm_vs_triton(m, n, k, topk, num_experts, monkeypatch, workspace_init): with monkeypatch.context() as mp: mp.setenv("VLLM_USE_DEEP_GEMM", "1") _fused_moe_mod = importlib.import_module( "vllm.model_executor.layers.fused_moe.fused_moe" ) call_counter = {"cnt": 0} orig_fn = _fused_moe_mod.deep_gemm_moe_fp8 def _spy_deep_gemm_moe_fp8(*args, **kwargs): call_counter["cnt"] += 1 return orig_fn(*args, **kwargs) monkeypatch.setattr(_fused_moe_mod, "deep_gemm_moe_fp8", _spy_deep_gemm_moe_fp8) if topk > num_experts: pytest.skip(f"topk={topk} > num_experts={num_experts}") run_single_case( m=m, n=n, k=k, topk=topk, num_experts=num_experts, block_size=BLOCK_SIZE, ) # ensure that the DeepGEMM path was indeed taken. assert call_counter["cnt"] == 1, ( f"DeepGEMM path was not executed during the test. " f"Call counter: {call_counter['cnt']}" )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_moe_permute_unpermute.py
tests/kernels/moe/test_moe_permute_unpermute.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Tests for the MOE permute/unpermute kernel Run `pytest tests/kernels/test_moe_permute_unpermute.py`. """ import numpy as np import pytest import torch from vllm.model_executor.layers.fused_moe.fused_moe import fused_topk from vllm.model_executor.layers.fused_moe.layer import determine_expert_map from vllm.model_executor.layers.fused_moe.moe_permute_unpermute import ( moe_permute, moe_permute_unpermute_supported, moe_unpermute, ) from vllm.platforms import current_platform NUM_EXPERTS = [16, 64, 256] TOP_KS = [2, 6, 8] EP_SIZE = [1, 4, 16] current_platform.seed_everything(0) if current_platform.is_rocm(): pytest.skip( "moe_permute_unpermute_supported is not defined for ROCm", allow_module_level=True, ) def torch_permute( hidden_states: torch.Tensor, topk_ids: torch.Tensor, # token_expert_indices: torch.Tensor, topk: int, n_expert: int, n_local_expert: int, start_expert: int, expert_map: torch.Tensor | None = None, align_block_size: int | None = None, fill_invalid_expert: int = -1, ) -> list[torch.Tensor]: n_token, n_hidden = hidden_states.shape[0], hidden_states.shape[1] if expert_map is not None: is_local_expert = expert_map[topk_ids] != -1 not_local_expert = expert_map[topk_ids] == -1 topk_ids = is_local_expert * (topk_ids - start_expert) + not_local_expert * ( topk_ids + n_expert ) token_expert_indices = torch.arange( 0, n_token * topk, dtype=torch.int32, device=hidden_states.device ).reshape((n_token, topk)) sorted_topk_ids, sorted_indices = torch.sort(topk_ids.flatten(), stable=True) dst_row_id2src_row_id_map = token_expert_indices.flatten()[sorted_indices] expert_first_token_offset = torch.zeros( n_local_expert + 1, dtype=torch.int64, device="cuda" ) idx = 0 for i in range(0, n_local_expert): cnt = 0 while idx < sorted_topk_ids.numel() and sorted_topk_ids[idx] == i: cnt += 1 idx += 1 expert_first_token_offset[i + 1] = expert_first_token_offset[i] + cnt _, src2dst_idx = torch.sort(dst_row_id2src_row_id_map) valid_row_idx = [] if align_block_size is None: permuted_hidden_states = hidden_states[dst_row_id2src_row_id_map // topk, ...] permuted_row_size = permuted_hidden_states.shape[0] m_indices = torch.empty( permuted_row_size, device="cuda", dtype=torch.int32 ).fill_(fill_invalid_expert) for i in range(1, n_local_expert + 1): first_token_offset = expert_first_token_offset[i - 1] last_token_offset = expert_first_token_offset[i] m_indices[first_token_offset:last_token_offset] = i - 1 src_row_id2dst_row_id_map = torch.arange( 0, n_token * topk, device="cuda", dtype=torch.int32 )[src2dst_idx].reshape((n_token, topk)) valid_row_idx += [i for i in range(expert_first_token_offset[-1])] dst_row_id2src_row_id_map[expert_first_token_offset[-1] :] = n_token * topk return [ permuted_hidden_states, expert_first_token_offset, src_row_id2dst_row_id_map, dst_row_id2src_row_id_map, m_indices, valid_row_idx, ] else: permuted_row_size = ( (topk * n_token + n_expert * (align_block_size - 1) + align_block_size - 1) // align_block_size * align_block_size ) permuted_idx = torch.full( (permuted_row_size,), n_token * topk, dtype=torch.int32, device=hidden_states.device, ) permuted_hidden_states = torch.empty( (permuted_row_size, n_hidden), device="cuda", dtype=hidden_states.dtype ) align_src_row_id2dst_row_id = torch.empty( n_token * topk, device="cuda", dtype=torch.int32 ) align_expert_first_token_offset = torch.zeros_like(expert_first_token_offset) m_indices = torch.empty( permuted_row_size, device="cuda", dtype=torch.int32 ).fill_(fill_invalid_expert) # get align_permuted_hidden_states, # valid row_idx and align_expert_first_token_offset for i in range(1, n_local_expert + 1): first_token_offset = expert_first_token_offset[i - 1] last_token_offset = expert_first_token_offset[i] n_token_in_expert = last_token_offset - first_token_offset align_expert_first_token_offset[i] = ( align_expert_first_token_offset[i - 1] + (n_token_in_expert + align_block_size - 1) // align_block_size * align_block_size ) align_first_token_offset = align_expert_first_token_offset[i - 1] align_last_token_offset = align_expert_first_token_offset[i] dst_row_id2src_row_id_in_expert = dst_row_id2src_row_id_map[ first_token_offset : first_token_offset + n_token_in_expert ] # store token in current expert with align_first_token_offset permuted_hidden_states[ align_first_token_offset : align_first_token_offset + n_token_in_expert, ..., ] = hidden_states[dst_row_id2src_row_id_in_expert // topk, ...] permuted_idx[ align_first_token_offset : align_first_token_offset + n_token_in_expert ] = dst_row_id2src_row_id_in_expert # set current expert m_indices m_indices[align_first_token_offset:align_last_token_offset] = i - 1 valid_row_idx += [ i for i in range( align_first_token_offset, align_first_token_offset + n_token_in_expert, ) ] # get align_src_row_id2dst_row_id for i in range(n_token * topk): eid = sorted_topk_ids[i] if eid >= n_local_expert: # check token not in local expert align_src_row_id2dst_row_id[i] = align_expert_first_token_offset[-1] continue first_token_offset = expert_first_token_offset[eid] align_first_token_offset = align_expert_first_token_offset[eid] token_offset = i - first_token_offset align_src_row_id2dst_row_id[i] = align_first_token_offset + token_offset align_src_row_id2dst_row_id = align_src_row_id2dst_row_id[src2dst_idx].reshape( (n_token, topk) ) return [ permuted_hidden_states, align_expert_first_token_offset, align_src_row_id2dst_row_id, permuted_idx, m_indices, valid_row_idx, ] def torch_unpermute( permuted_hidden_states: torch.Tensor, topk_weights: torch.Tensor, topk_ids: torch.Tensor, token_expert_indices: torch.Tensor, src_row_id2dst_row_id_map: torch.Tensor, valid_row_idx: torch.Tensor, topk: int, n_expert: int, ) -> torch.Tensor: # ignore invalid row n_hidden = permuted_hidden_states.shape[1] mask = torch.zeros(permuted_hidden_states.shape[0], dtype=bool, device="cuda") mask[valid_row_idx] = True permuted_hidden_states[~mask] = 0 permuted_hidden_states = permuted_hidden_states[ src_row_id2dst_row_id_map.flatten(), ... ] permuted_hidden_states = permuted_hidden_states.view(-1, topk, n_hidden) output = ( (permuted_hidden_states * topk_weights.unsqueeze(2)) .sum(1) .to(permuted_hidden_states.dtype) ) return output @pytest.mark.parametrize("n_token", [1, 33, 1024, 5000]) @pytest.mark.parametrize("n_hidden", [2048, 7168]) @pytest.mark.parametrize("n_expert", NUM_EXPERTS) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("dtype", [torch.bfloat16]) @pytest.mark.parametrize("ep_size", EP_SIZE) @pytest.mark.parametrize("align_block_size", [None, 128]) def test_moe_permute_unpermute( n_token: int, n_hidden: int, topk: int, n_expert: int, ep_size: int, dtype: torch.dtype, align_block_size: int | None, ): if not moe_permute_unpermute_supported(): pytest.skip("moe_permute_unpermute is not supported on this platform.") fill_invalid_expert = 0 ep_rank = np.random.randint(0, ep_size) expert_map = None n_local_expert = n_expert if ep_size != 1: n_local_expert, expert_map, _ = determine_expert_map(ep_size, ep_rank, n_expert) expert_map = expert_map.cuda() start_expert = n_local_expert * ep_rank current_platform.seed_everything(0) hidden_states = torch.randn((n_token, n_hidden), device="cuda").to(dtype) gating_output = torch.randn((n_token, n_expert), device="cuda").to(dtype) topk_weights, topk_ids, token_expert_indices = fused_topk( hidden_states, gating_output, topk, False ) ( gold_permuted_hidden_states, gold_expert_first_token_offset, gold_inv_permuted_idx, gold_permuted_idx, gold_m_indices, valid_row_idx, ) = torch_permute( hidden_states, topk_ids, # token_expert_indices, topk, n_expert, n_local_expert, start_expert, expert_map=expert_map, align_block_size=align_block_size, fill_invalid_expert=fill_invalid_expert, ) ( permuted_hidden_states, _, expert_first_token_offset, inv_permuted_idx, m_indices, ) = moe_permute( hidden_states=hidden_states, a1q_scale=None, topk_ids=topk_ids, n_expert=n_expert, n_local_expert=n_local_expert, expert_map=expert_map, align_block_size=align_block_size, fill_invalid_expert=fill_invalid_expert, ) # check expert_first_token_offset torch.testing.assert_close( gold_expert_first_token_offset, expert_first_token_offset, atol=0, rtol=0 ) # check src_row_id2dst_row_id_map torch.testing.assert_close( gold_inv_permuted_idx.flatten(), inv_permuted_idx, atol=0, rtol=0 ) # check mindice # current kernel usage assumes deepgemm requires align_block_size # when it's not provided then we don't compute m_indices (for cutlass) if align_block_size is not None: torch.testing.assert_close(gold_m_indices, m_indices, atol=0, rtol=0) # check permuted_hidden_states, only valid token torch.testing.assert_close( gold_permuted_hidden_states[valid_row_idx], permuted_hidden_states[valid_row_idx], atol=0, rtol=0, ) # add a random tensor to simulate group gemm result0 = 0.5 * permuted_hidden_states + torch.randn_like(permuted_hidden_states) result4 = torch.empty_like(hidden_states) moe_unpermute( result4, result0, topk_weights, inv_permuted_idx, expert_first_token_offset ) gold4 = torch_unpermute( result0, topk_weights, topk_ids, token_expert_indices, inv_permuted_idx, valid_row_idx, topk, n_local_expert, ) # check unpermuted hidden torch.testing.assert_close(result4, gold4, atol=2e-2, rtol=0)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_pplx_moe.py
tests/kernels/moe/test_pplx_moe.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Tests for the MOE layers. Run `pytest tests/kernels/test_pplx_moe.py`. """ import copy import itertools import textwrap import traceback from collections.abc import Callable import pytest import torch try: from pplx_kernels import AllToAll from pplx_kernels.nvshmem import ( nvshmem_alloc_empty_unique_id, nvshmem_finalize, nvshmem_get_unique_id, nvshmem_init, ) has_pplx = True except ImportError: has_pplx = False from tests.kernels.moe.modular_kernel_tools.parallel_utils import _set_vllm_config from tests.kernels.moe.utils import ( make_shared_experts, make_test_weights, naive_batched_moe, ) from tests.kernels.quant_utils import dequant from tests.kernels.utils import torch_experts from vllm.config import VllmConfig, set_current_vllm_config from vllm.model_executor.layers.fused_moe import fused_topk, override_config from vllm.model_executor.layers.fused_moe.config import FusedMoEQuantConfig from vllm.model_executor.layers.fused_moe.fused_batched_moe import BatchedTritonExperts from vllm.model_executor.layers.fused_moe.fused_moe import get_default_config from vllm.model_executor.layers.fused_moe.modular_kernel import FusedMoEModularKernel from vllm.model_executor.layers.fused_moe.topk_weight_and_reduce import ( TopKWeightAndReduceDelegate, ) from vllm.platforms import current_platform from vllm.utils.math_utils import round_up from vllm.v1.worker.workspace import init_workspace_manager from ...utils import multi_gpu_test from .parallel_utils import ProcessGroupInfo, parallel_launch requires_pplx = pytest.mark.skipif( not has_pplx, reason="Requires PPLX kernels", ) BATCHED_MOE_MNK_FACTORS = [ (1, 128, 128), (33, 2048, 128), (64, 128, 2048), (222, 128, 128), (222, 2048, 1024), ] PPLX_COMBOS = [ # TODO(bnell): figure out why this fails, seems to be test problem # (1, 128, 128), (2, 128, 512), (3, 1024, 2048), (4, 128, 128), (32, 1024, 512), (45, 512, 2048), (64, 1024, 512), (222, 2048, 1024), (256, 1408, 2048), ] NUM_EXPERTS = [8, 64] TOP_KS = [1, 2, 6] DTYPES = [torch.float8_e4m3fn, torch.bfloat16] vllm_config = VllmConfig() def torch_prepare( a: torch.Tensor, topk_ids: torch.Tensor, num_experts: int, max_num_tokens: int | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: assert topk_ids.dim() == 2 assert topk_ids.shape[0] == a.shape[0] num_tokens, hidden_dim = a.shape topk = topk_ids.shape[1] tokens_per_expert = torch.bincount(topk_ids.view(-1), minlength=num_experts) assert tokens_per_expert.numel() == num_experts if max_num_tokens is None: max_num_tokens = int(tokens_per_expert.max().item()) b_a = torch.zeros( (num_experts, max_num_tokens, hidden_dim), dtype=a.dtype, device=a.device ) token_counts = torch.zeros(num_experts, dtype=torch.int, device=a.device) for token in range(num_tokens): for j in range(topk): expert_id = topk_ids[token, j] idx = token_counts[expert_id] b_a[expert_id, idx : idx + 1, :] = a[token, :] token_counts[expert_id] = token_counts[expert_id] + 1 return b_a, tokens_per_expert def torch_finalize( b_out: torch.Tensor, topk_weight: torch.Tensor, topk_ids: torch.Tensor ) -> torch.Tensor: num_tokens = topk_ids.shape[0] num_experts = b_out.shape[0] K = b_out.shape[-1] out = torch.zeros((num_tokens, K), dtype=b_out.dtype, device=b_out.device) expert_counts = torch.zeros(num_experts, dtype=torch.int, device=b_out.device) for token in range(num_tokens): expert_ids = topk_ids[token] for i in range(expert_ids.numel()): expert_id = expert_ids[i] idx = expert_counts[expert_id] out[token, :] = ( out[token, :] + b_out[expert_id, idx : idx + 1, :] * topk_weight[token, i] ) expert_counts[expert_id] = expert_counts[expert_id] + 1 return out def torch_batched_moe( a: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, topk_weight: torch.Tensor, topk_ids: torch.Tensor, ) -> torch.Tensor: num_experts = w1.shape[0] b_a, tokens_per_expert = torch_prepare(a, topk_ids, num_experts) assert b_a.dim() == 3 num_tokens, topk = topk_ids.shape _, max_num_tokens, K = b_a.shape assert num_experts == b_a.shape[0] and w2.shape[1] == K out = torch.zeros( (num_experts, max_num_tokens, K), dtype=b_a.dtype, device=b_a.device ) tmp = torch.empty( (max_num_tokens, w1.shape[1] // 2), dtype=b_a.dtype, device=b_a.device ) for expert in range(num_experts): num = tokens_per_expert[expert] if num > 0: torch.ops._C.silu_and_mul( tmp[:num], b_a[expert, :num, :] @ w1[expert].transpose(0, 1) ) out[expert, :num, :] = tmp[:num] @ w2[expert].transpose(0, 1) return torch_finalize(out, topk_weight, topk_ids) @pytest.mark.parametrize("m,n,k", BATCHED_MOE_MNK_FACTORS) @pytest.mark.parametrize("e", NUM_EXPERTS) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("dtype", [torch.bfloat16]) def test_fused_moe_batched_experts( m: int, n: int, k: int, e: int, topk: int, dtype: torch.dtype, workspace_init, ): current_platform.seed_everything(7) a = torch.randn((m, k), device="cuda", dtype=dtype) / 10 w1 = torch.randn((e, 2 * n, k), device="cuda", dtype=dtype) / 10 w2 = torch.randn((e, k, n), device="cuda", dtype=dtype) / 10 score = torch.randn((m, e), device="cuda", dtype=dtype) with set_current_vllm_config(vllm_config): topk_weight, topk_ids, _ = fused_topk(a, score, topk, False) baseline_output = torch_experts( a, w1, w2, topk_weight, topk_ids ) # only for baseline torch_output = torch_batched_moe(a, w1, w2, topk_weight, topk_ids) batched_output = naive_batched_moe( a, w1, w2, topk_weight, topk_ids ) # pick torch_experts or this torch.testing.assert_close(baseline_output, torch_output, atol=2e-2, rtol=0) torch.testing.assert_close(baseline_output, batched_output, atol=2e-2, rtol=0) def create_pplx_prepare_finalize( num_tokens: int, hidden_dim: int, topk: int, num_experts: int, rank: int, dp_size: int, world_size: int, in_dtype: torch.dtype, quant_dtype: torch.dtype | None, block_shape: list[int] | None, per_act_token_quant: bool, group_name: str | None, ): from vllm.model_executor.layers.fused_moe.pplx_prepare_finalize import ( PplxPrepareAndFinalize, pplx_hidden_dim_scale_bytes, ) max_num_tokens = max(rank_chunk(num_tokens, 0, world_size), 1) num_local_experts = rank_chunk(num_experts, 0, world_size) hidden_dim_bytes, scale_bytes = pplx_hidden_dim_scale_bytes( max_num_tokens, hidden_dim, in_dtype, quant_dtype, per_act_token_quant=per_act_token_quant, block_shape=block_shape, ) args = dict( max_num_tokens=max_num_tokens, num_experts=num_experts, experts_per_token=topk, rank=rank, world_size=world_size, dp_size=dp_size, hidden_dim=hidden_dim, hidden_dim_bytes=hidden_dim_bytes, hidden_dim_scale_bytes=scale_bytes, ) if group_name is None: ata = AllToAll.internode(**args) else: args["group_name"] = group_name ata = AllToAll.intranode(**args) prepare_finalize = PplxPrepareAndFinalize( ata, max_num_tokens=max_num_tokens, num_local_experts=num_local_experts, num_dispatchers=world_size // dp_size, ) return prepare_finalize, ata def rank_chunk(num: int, r: int, w: int) -> int: rem = num % w return (num // w) + (1 if r < rem else 0) def chunk_by_rank(t: torch.Tensor, r: int, w: int) -> torch.Tensor: chunk = rank_chunk(t.shape[0], r, w) return t[(r * chunk) : (r + 1) * chunk] def maybe_chunk_by_rank(t: torch.Tensor | None, r: int, w: int) -> torch.Tensor | None: if t is not None: return chunk_by_rank(t, r, w) else: return t def chunk_scales_by_rank(t: torch.Tensor | None, r: int, w: int) -> torch.Tensor | None: if t is not None and t.numel() > 1: chunk = rank_chunk(t.shape[0], r, w) return t[(r * chunk) : (r + 1) * chunk] else: return t def chunk_scales(t: torch.Tensor | None, start: int, end: int) -> torch.Tensor | None: if t is not None and t.numel() > 1: return t[start:end] else: return t def dummy_work(a: torch.Tensor) -> torch.Tensor: return a * 1.1 def pplx_prepare_finalize( pgi: ProcessGroupInfo, dp_size: int, a: torch.Tensor, topk_weight: torch.Tensor, topk_ids: torch.Tensor, num_experts: int, quant_dtype: torch.dtype | None, block_shape: list[int] | None, per_act_token_quant: bool, group_name: str | None, ) -> torch.Tensor: assert torch.cuda.current_device() == pgi.local_rank topk = topk_ids.shape[1] num_tokens, hidden_dim = a.shape device = pgi.device rank = pgi.rank world_size = pgi.world_size topk_ids = topk_ids.to(dtype=torch.uint32) prepare_finalize, ata = create_pplx_prepare_finalize( num_tokens, hidden_dim, topk, num_experts, rank, dp_size, world_size, a.dtype, quant_dtype, block_shape, per_act_token_quant, group_name, ) assert a.shape[0] == topk_ids.shape[0] a_chunk = chunk_by_rank(a, rank, world_size).to(device) chunk_topk_weight = chunk_by_rank(topk_weight, rank, world_size).to(device) chunk_topk_ids = chunk_by_rank(topk_ids, rank, world_size).to(device) assert a_chunk.shape[0] == chunk_topk_ids.shape[0] out = torch.full( a_chunk.shape, torch.nan, dtype=a.dtype, device=device, ) if quant_dtype is not None and not per_act_token_quant and block_shape is None: a1_scale = torch.tensor(1.0, device="cuda", dtype=torch.float32) a2_scale = torch.tensor(1.0, device="cuda", dtype=torch.float32) else: a1_scale = None a2_scale = None b_a, b_a_scale, expert_num_tokens, _, _ = prepare_finalize.prepare( a_chunk, chunk_topk_weight, chunk_topk_ids, num_experts, None, False, FusedMoEQuantConfig.make( quant_dtype, per_act_token_quant=per_act_token_quant, per_out_ch_quant=False, block_shape=block_shape, a1_scale=a1_scale, a2_scale=a2_scale, ), ) b_a = dummy_work(dequant(b_a, b_a_scale, block_shape, per_act_token_quant, a.dtype)) prepare_finalize.finalize( out, b_a, chunk_topk_weight, chunk_topk_ids, False, weight_and_reduce_impl=TopKWeightAndReduceDelegate(), ) torch.cuda.synchronize() ata.destroy() num_tokens = a_chunk.shape[0] return out[:num_tokens] def _pplx_prepare_finalize( pgi: ProcessGroupInfo, dp_size: int, a: torch.Tensor, score: torch.Tensor, topk: torch.Tensor, num_experts: int, quant_dtype: torch.dtype | None, block_shape: list[int] | None, per_act_token_quant: bool, use_internode: bool, ): try: if use_internode: uid = ( nvshmem_get_unique_id() if pgi.rank == 0 else nvshmem_alloc_empty_unique_id() ) torch.distributed.broadcast(uid, src=0) nvshmem_init(uid, pgi.rank, pgi.world_size) group_name = None else: group_ranks = list(range(pgi.world_size)) cpu_group = torch.distributed.new_group(group_ranks, backend="gloo") group_name = cpu_group.group_name topk_weight, topk_ids, _ = fused_topk(a, score, topk, False) m, k = a.shape a_rep = torch.repeat_interleave(dummy_work(a), topk, dim=0) torch_output = ( a_rep.view(m, topk, k) * topk_weight.view(m, topk, 1).to(a_rep.dtype) ).sum(dim=1) pplx_output = pplx_prepare_finalize( pgi, dp_size, a, topk_weight, topk_ids, num_experts, quant_dtype, block_shape, per_act_token_quant, group_name, ) torch_output = chunk_by_rank(torch_output, pgi.rank, pgi.world_size).to( pgi.device ) torch.testing.assert_close(pplx_output, torch_output, atol=3e-2, rtol=3e-2) finally: if use_internode: nvshmem_finalize() @pytest.mark.parametrize("mnk", PPLX_COMBOS) @pytest.mark.parametrize("e", NUM_EXPERTS) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("world_dp_size", [[2, 1]]) @pytest.mark.parametrize("per_act_token_quant", [False, True]) @pytest.mark.parametrize("block_shape", [None, [128, 128]]) @pytest.mark.parametrize("use_internode", [False]) @pytest.mark.optional @requires_pplx @multi_gpu_test(num_gpus=2) def test_pplx_prepare_finalize_slow( mnk: tuple[int, int, int], e: int, topk: int, dtype: torch.dtype, world_dp_size: tuple[int, int], per_act_token_quant: bool, block_shape: list[int] | None, use_internode: bool, ): if dtype == torch.float8_e4m3fn: use_fp8_w8a8 = True act_dtype = torch.bfloat16 quant_dtype = dtype else: use_fp8_w8a8 = False act_dtype = dtype quant_dtype = None if not use_fp8_w8a8 and (per_act_token_quant or block_shape is not None): pytest.skip("Skip quantization test for non-quantized type") if per_act_token_quant and block_shape is not None: pytest.skip("Skip illegal quantization combination") current_platform.seed_everything(7) m, n, k = mnk world_size, dp_size = world_dp_size device = "cuda" a = torch.randn((m, k), device=device, dtype=act_dtype) / 10 score = torch.randn((m, e), device=device, dtype=act_dtype) parallel_launch( world_size, _pplx_prepare_finalize, dp_size, a, score, topk, e, quant_dtype, block_shape, per_act_token_quant, use_internode, ) def pplx_moe( group_name: str | None, rank: int, world_size: int, dp_size: int, a: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, topk_weight: torch.Tensor, topk_ids: torch.Tensor, w1_scale: torch.Tensor | None = None, w2_scale: torch.Tensor | None = None, a1_scale: torch.Tensor | None = None, a2_scale: torch.Tensor | None = None, quant_dtype: torch.dtype | None = None, per_act_token_quant=False, block_shape: list[int] | None = None, use_compile: bool = False, use_cudagraphs: bool = True, shared_experts: torch.nn.Module | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: num_tokens, hidden_dim = a.shape num_experts = w1.shape[0] topk = topk_ids.shape[1] max_num_tokens = round_up(rank_chunk(a.shape[0], 0, world_size), 16) prepare_finalize, ata = create_pplx_prepare_finalize( num_tokens, hidden_dim, topk, num_experts, rank, dp_size, world_size, a.dtype, quant_dtype, block_shape, per_act_token_quant, group_name, ) topk_ids = topk_ids.to(dtype=torch.uint32) # Note: workers with the same dp_rank must use the exact same inputs. a_chunk = chunk_by_rank(a, rank, world_size) chunk_topk_weight = chunk_by_rank(topk_weight, rank, world_size) chunk_topk_ids = chunk_by_rank(topk_ids, rank, world_size) # Chunking weights like this only works for batched format w1_chunk = chunk_by_rank(w1, rank, world_size) w2_chunk = chunk_by_rank(w2, rank, world_size) w1_scale_chunk = maybe_chunk_by_rank(w1_scale, rank, world_size) w2_scale_chunk = maybe_chunk_by_rank(w2_scale, rank, world_size) a1_scale_chunk = chunk_scales_by_rank(a1_scale, rank, world_size) a2_scale_chunk = chunk_scales_by_rank(a2_scale, rank, world_size) quant_config = FusedMoEQuantConfig.make( quant_dtype, block_shape=block_shape, per_act_token_quant=per_act_token_quant, w1_scale=w1_scale_chunk, w2_scale=w2_scale_chunk, a1_scale=a1_scale_chunk, a2_scale=a2_scale_chunk, ) experts = BatchedTritonExperts( max_num_tokens=max_num_tokens, num_dispatchers=prepare_finalize.num_dispatchers(), quant_config=quant_config, ) fused_experts = FusedMoEModularKernel( prepare_finalize, experts, shared_experts, ) # Note: for now use_compile will error out if the problem size is # large enough to trigger chunking. I'm leaving the flag and # setup code in case we are able to revisit this later. if use_compile: _fused_experts = torch.compile( fused_experts, backend="inductor", fullgraph=True ) torch._dynamo.mark_dynamic(a_chunk, 0) torch._dynamo.mark_dynamic(chunk_topk_weight, 0) torch._dynamo.mark_dynamic(chunk_topk_ids, 0) else: _fused_experts = fused_experts out = _fused_experts( a_chunk, w1_chunk, w2_chunk, chunk_topk_weight, chunk_topk_ids, global_num_experts=num_experts, ) if use_cudagraphs: if isinstance(out, tuple): out[0].fill_(0) out[1].fill_(0) else: out.fill_(0) stream = torch.cuda.Stream() graph = torch.cuda.CUDAGraph() with torch.cuda.graph(graph, stream=stream): out = _fused_experts( a_chunk, w1_chunk, w2_chunk, chunk_topk_weight, chunk_topk_ids, global_num_experts=num_experts, ) torch.cuda.synchronize() graph.replay() torch.cuda.synchronize() ata.destroy() return out def _pplx_moe( pgi: ProcessGroupInfo, dp_size: int, a: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, score: torch.Tensor, topk: int, num_experts: int, w1_s: torch.Tensor | None = None, w2_s: torch.Tensor | None = None, quant_dtype: torch.dtype | None = None, per_act_token_quant: bool = False, block_shape: list[int] | None = None, use_internode: bool = False, shared_experts: torch.nn.Module | None = None, ): try: if use_internode: uid = ( nvshmem_get_unique_id() if pgi.rank == 0 else nvshmem_alloc_empty_unique_id() ) torch.distributed.broadcast(uid, src=0) nvshmem_init(uid, pgi.rank, pgi.world_size) group_name = None else: group_ranks = list(range(pgi.world_size)) cpu_group = torch.distributed.new_group(group_ranks, backend="gloo") group_name = cpu_group.group_name m, k = a.shape e, _, n = w2.shape moe_config = get_default_config(m, e, n, k, topk, a.dtype, False) device = torch.device("cuda", pgi.rank) rank = pgi.rank world_size = pgi.world_size a = a.to(device) w1 = w1.to(device) w2 = w2.to(device) w1_s = w1_s.to(device) if w1_s is not None else None w2_s = w2_s.to(device) if w2_s is not None else None if quant_dtype is not None and not per_act_token_quant and block_shape is None: a1_scale = torch.tensor(1.0, device="cuda", dtype=torch.float32) a2_scale = torch.tensor(1.0, device="cuda", dtype=torch.float32) else: a1_scale = None a2_scale = None with set_current_vllm_config(vllm_config), override_config(moe_config): topk_weight, topk_ids, _ = fused_topk(a, score, topk, False) shared_output = shared_experts(a) if shared_experts is not None else None torch_output = torch_experts( a, w1, w2, topk_weight, topk_ids, w1_scale=w1_s, w2_scale=w2_s, a1_scale=a1_scale, a2_scale=a2_scale, quant_dtype=quant_dtype, per_act_token_quant=per_act_token_quant, block_shape=block_shape, ) batched_output = naive_batched_moe( a, w1, w2, topk_weight, topk_ids, w1_scale=w1_s, w2_scale=w2_s, a1_scale=a1_scale, a2_scale=a2_scale, quant_dtype=quant_dtype, per_act_token_quant=per_act_token_quant, block_shape=block_shape, ) pplx_outputs = pplx_moe( group_name, rank, world_size, dp_size, a, w1, w2, topk_weight, topk_ids, w1_scale=w1_s, w2_scale=w2_s, a1_scale=a1_scale, a2_scale=a2_scale, quant_dtype=quant_dtype, per_act_token_quant=per_act_token_quant, block_shape=block_shape, shared_experts=shared_experts, ) if shared_experts is None: pplx_shared_output = None pplx_output = pplx_outputs assert isinstance(pplx_output, torch.Tensor) else: pplx_shared_output, pplx_output = pplx_outputs if shared_output is not None: assert pplx_shared_output is not None chunked_shared_output = chunk_by_rank( shared_output, pgi.rank, pgi.world_size ).to(pplx_shared_output.device) else: chunked_shared_output = None chunked_batch_output = chunk_by_rank( batched_output, pgi.rank, pgi.world_size ).to(pplx_output.device) torch.testing.assert_close(batched_output, torch_output, atol=3e-2, rtol=3e-2) torch.testing.assert_close( pplx_output, chunked_batch_output, atol=3e-2, rtol=3e-2 ) if shared_experts is not None: assert chunked_shared_output is not None assert pplx_shared_output is not None torch.testing.assert_close( pplx_shared_output, chunked_shared_output, atol=3e-2, rtol=3e-2 ) finally: if use_internode: nvshmem_finalize() @pytest.mark.parametrize("mnk", PPLX_COMBOS) @pytest.mark.parametrize("e", NUM_EXPERTS) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("world_dp_size", [[2, 1]]) @pytest.mark.parametrize("per_act_token_quant", [False, True]) @pytest.mark.parametrize("block_shape", [None, [128, 128]]) @pytest.mark.parametrize("use_internode", [False]) @pytest.mark.optional @requires_pplx @multi_gpu_test(num_gpus=2) def test_pplx_moe_slow( mnk: tuple[int, int, int], e: int, topk: int, dtype: torch.dtype, world_dp_size: tuple[int, int], per_act_token_quant: bool, block_shape: list[int] | None, use_internode: bool, ): current_platform.seed_everything(7) m, n, k = mnk world_size, dp_size = world_dp_size if dtype == torch.float8_e4m3fn: use_fp8_w8a8 = True quant_dtype = dtype else: use_fp8_w8a8 = False quant_dtype = None if not use_fp8_w8a8 and (per_act_token_quant or block_shape is not None): pytest.skip("Skip quantization test for non-quantized type") if per_act_token_quant and block_shape is not None: pytest.skip("Skip illegal quantization combination") a = torch.randn((m, k), device="cuda", dtype=torch.bfloat16) / 10 score = torch.randn((m, e), device="cuda", dtype=torch.bfloat16) (_, w1, w1_s, _), (_, w2, w2_s, _) = make_test_weights( e, n, k, quant_dtype=quant_dtype, block_shape=block_shape, per_out_ch_quant=per_act_token_quant, ) parallel_launch( world_size, _pplx_moe, dp_size, a, w1, w2, score, topk, e, w1_s, w2_s, quant_dtype, per_act_token_quant, block_shape, use_internode, ) def _pplx_test_loop( pgi: ProcessGroupInfo, dp_size: int, use_internode: bool, use_shared_experts: bool, make_weights: bool, test_fn: Callable, ): device = torch.device(f"cuda:{pgi.local_rank}") init_workspace_manager(device) def format_result(msg, ex=None): if ex is not None: x = str(ex) newx = x.strip(" \n\t")[:16] if len(newx) < len(x): newx = newx + " ..." prefix = "E\t" print(f"{textwrap.indent(traceback.format_exc(), prefix)}") print(f"FAILED {msg} - {newx}\n") else: print(f"PASSED {msg}") if use_shared_experts: # Note: this config is only needed for the non-naive shared experts. new_vllm_config = copy.deepcopy(vllm_config) new_vllm_config.parallel_config.data_parallel_size = pgi.world_size new_vllm_config.parallel_config.enable_expert_parallel = True _set_vllm_config(new_vllm_config, pgi.world_size, pgi.rank, pgi.local_rank) current_platform.seed_everything(7) combos = itertools.product( PPLX_COMBOS, NUM_EXPERTS, TOP_KS, DTYPES, [False, True], [None, [128, 128]] ) exceptions = [] count = 0 for mnk, e, topk, dtype, per_act_token_quant, block_shape in combos: count = count + 1 m, n, k = mnk if dtype == torch.float8_e4m3fn: use_fp8_w8a8 = True quant_dtype = dtype else: use_fp8_w8a8 = False quant_dtype = None test_desc = ( f"test_pplx_moe[mnk={mnk}, e={e}, topk={topk}, " f"dtype={dtype}, per_act_token={per_act_token_quant}, " f"block_shape={block_shape}, use_internode={use_internode}, " f"use_shared_experts={use_shared_experts}" ) if not use_fp8_w8a8 and (per_act_token_quant or block_shape is not None): print(f"{test_desc} - Skip quantization test for non-quantized type.") continue if per_act_token_quant and block_shape is not None: print(f"{test_desc} - Skip illegal quantization combination.") continue a = torch.randn((m, k), device="cuda", dtype=torch.bfloat16) / 10 score = torch.randn((m, e), device="cuda", dtype=torch.bfloat16) args = dict() if make_weights: (_, w1, w1_s, _), (_, w2, w2_s, _) = make_test_weights( e, n, k, quant_dtype=quant_dtype, block_shape=block_shape, per_out_ch_quant=per_act_token_quant, ) args["w1"] = w1 args["w2"] = w2 args["w1_s"] = w1_s args["w2_s"] = w2_s if use_shared_experts: args["shared_experts"] = make_shared_experts( n, k, in_dtype=a.dtype, quant_dtype=quant_dtype, ) try: test_fn( pgi=pgi, dp_size=dp_size, a=a, score=score, topk=topk, num_experts=e, quant_dtype=quant_dtype, per_act_token_quant=per_act_token_quant, block_shape=block_shape, use_internode=use_internode, **args, ) format_result(test_desc) except Exception as ex: format_result(test_desc, ex) exceptions.append(ex) if len(exceptions) > 0: raise RuntimeError( f"{len(exceptions)} of {count} tests failed in child process, " f"rank={pgi.rank}." ) else: print(f"{count} of {count} tests passed in child process, rank={pgi.rank}.") @pytest.mark.parametrize("world_dp_size", [[2, 1]]) @pytest.mark.parametrize("use_internode", [False]) @requires_pplx @multi_gpu_test(num_gpus=2) def test_pplx_prepare_finalize( world_dp_size: tuple[int, int], use_internode: bool, ): current_platform.seed_everything(7) world_size, dp_size = world_dp_size parallel_launch( world_size * dp_size, _pplx_test_loop, dp_size, use_internode, False, False, _pplx_prepare_finalize, ) @pytest.mark.parametrize("world_dp_size", [[2, 1]]) @pytest.mark.parametrize("use_internode", [False]) @pytest.mark.parametrize("use_shared_experts", [False, True]) @requires_pplx @multi_gpu_test(num_gpus=2) def test_pplx_moe( world_dp_size: tuple[int, int], use_internode: bool, use_shared_experts: bool, ): current_platform.seed_everything(7) world_size, dp_size = world_dp_size parallel_launch( world_size, _pplx_test_loop, dp_size, use_internode, use_shared_experts, True, _pplx_moe, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_rocm_aiter_topk.py
tests/kernels/moe/test_rocm_aiter_topk.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # This is a test for the AITER ops. # It tests if the AITER ops are # 1. correctly registered as custom ops # 2. correctly defined the relationship between # implementation and fake function # 3. can be used with torch.compile # This file will be skipped if AITER is not installed # and the platform is not ROCm. import importlib.util import pytest import torch # this import statement is needed to ensure the ops are registered import vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe # noqa: F401 from vllm.platforms import current_platform # need to import once to ensure the ops are registered # Check if aiter package is installed aiter_available = importlib.util.find_spec("aiter") is not None pytestmark = pytest.mark.skipif( not (current_platform.is_rocm() and aiter_available), reason="AITER ops are only available on ROCm with aiter package installed", ) def test_rocm_aiter_biased_grouped_topk_custom_op_registration(): """Test that the custom op is correctly registered.""" # Check if the op exists in torch.ops.vllm assert hasattr(torch.ops.vllm, "rocm_aiter_biased_grouped_topk") # Check if the op is callable assert callable(torch.ops.vllm.rocm_aiter_biased_grouped_topk) def test_rocm_aiter_grouped_topk_custom_op_registration(): """Test that the custom op is correctly registered.""" # Check if the op exists in torch.ops.vllm assert hasattr(torch.ops.vllm, "rocm_aiter_grouped_topk") # Check if the op is callable assert callable(torch.ops.vllm.rocm_aiter_grouped_topk) def test_rocm_aiter_biased_grouped_topk_torch_compile_compatibility(): """Test that the op can be used with torch.compile.""" # Create test tensors token = 64 expert = 256 num_expert_group = 8 topk = 8 topk_group = 4 renormalize = True scale_factor = 1.0 gating_output = torch.randn((token, expert), dtype=torch.bfloat16, device="cuda") e_score_correction_bias = torch.randn( (expert,), dtype=torch.bfloat16, device="cuda" ) device = gating_output.device topk_ids = torch.empty((token, topk), dtype=torch.int32, device=device) topk_weights = torch.empty((token, topk), dtype=torch.float32, device=device) # Define a function that uses the op def biased_grouped_topk_fn( gating_output, e_score_correction_bias, topk_weights, topk_ids ): return torch.ops.vllm.rocm_aiter_biased_grouped_topk( gating_output, e_score_correction_bias, topk_weights, topk_ids, num_expert_group, topk_group, renormalize, scale_factor, ) # Verify the op's fake implementation torch.library.opcheck( torch.ops.vllm.rocm_aiter_biased_grouped_topk, (gating_output, e_score_correction_bias, topk_weights, topk_ids), kwargs={ "num_expert_group": num_expert_group, "topk_group": topk_group, "need_renorm": renormalize, "routed_scaling_factor": scale_factor, }, test_utils=("test_faketensor"), ) # Compile the function with appropriate settings compiled_fn = torch.compile( biased_grouped_topk_fn, fullgraph=True, backend="inductor", mode="reduce-overhead", dynamic=False, ) topk_weights_original = torch.empty( (token, topk), dtype=torch.float32, device=device ) topk_ids_original = torch.empty((token, topk), dtype=torch.int32, device=device) topk_weights_compiled = torch.empty( (token, topk), dtype=torch.float32, device=device ) topk_ids_compiled = torch.empty((token, topk), dtype=torch.int32, device=device) # Run both compiled (V1 graph mode) and uncompiled versions (V1 eager mode) biased_grouped_topk_fn( gating_output, e_score_correction_bias, topk_weights_original, topk_ids_original ) compiled_fn( gating_output, e_score_correction_bias, topk_weights_compiled, topk_ids_compiled ) # Sort the results for comparison since the order might not be deterministic topk_ids_original, indices_original = torch.sort(topk_ids_original) topk_weights_original = torch.gather(topk_weights_original, 1, indices_original) topk_ids_compiled, indices_compiled = torch.sort(topk_ids_compiled) topk_weights_compiled = torch.gather(topk_weights_compiled, 1, indices_compiled) # Verify results match assert torch.allclose( topk_weights_original, topk_weights_compiled, rtol=1e-2, atol=1e-2 ) assert torch.allclose(topk_ids_original, topk_ids_compiled) def test_rocm_aiter_grouped_topk_torch_compile_compatibility(): """Test that the op can be used with torch.compile.""" # Create test tensors token = 64 expert = 256 num_expert_group = 8 topk = 8 topk_group = 4 renormalize = True scoring_func = "softmax" scale_factor = 1.0 gating_output = torch.randn((token, expert), dtype=torch.bfloat16, device="cuda") device = gating_output.device topk_ids = torch.empty((token, topk), dtype=torch.int32, device=device) topk_weights = torch.empty((token, topk), dtype=torch.float32, device=device) # Define a function that uses the op def grouped_topk_fn(gating_output, topk_weights, topk_ids, scoring_func): return torch.ops.vllm.rocm_aiter_grouped_topk( gating_output, topk_weights, topk_ids, num_expert_group, topk_group, renormalize, scoring_func, scale_factor, ) # Verify the op's fake implementation torch.library.opcheck( torch.ops.vllm.rocm_aiter_grouped_topk, (gating_output, topk_weights, topk_ids), kwargs={ "num_expert_group": num_expert_group, "topk_group": topk_group, "need_renorm": renormalize, "scoring_func": scoring_func, "routed_scaling_factor": scale_factor, }, test_utils=("test_faketensor"), ) # Compile the function with appropriate settings compiled_fn = torch.compile( grouped_topk_fn, fullgraph=True, backend="inductor", mode="reduce-overhead", dynamic=False, ) topk_weights_original = torch.empty( (token, topk), dtype=torch.float32, device=device ) topk_ids_original = torch.empty((token, topk), dtype=torch.int32, device=device) topk_weights_compiled = torch.empty( (token, topk), dtype=torch.float32, device=device ) topk_ids_compiled = torch.empty((token, topk), dtype=torch.int32, device=device) # Run both compiled (V1 graph mode) and uncompiled versions (V1 eager mode) grouped_topk_fn( gating_output, topk_weights_original, topk_ids_original, scoring_func ) compiled_fn(gating_output, topk_weights_compiled, topk_ids_compiled, scoring_func) # Sort the results for comparison since the order might not be deterministic topk_ids_original, indices_original = torch.sort(topk_ids_original) topk_weights_original = torch.gather(topk_weights_original, 1, indices_original) topk_ids_compiled, indices_compiled = torch.sort(topk_ids_compiled) topk_weights_compiled = torch.gather(topk_weights_compiled, 1, indices_compiled) # Verify results match assert torch.allclose( topk_weights_original, topk_weights_compiled, rtol=1e-2, atol=1e-2 ) assert torch.allclose(topk_ids_original, topk_ids_compiled)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_gpt_oss_triton_kernels.py
tests/kernels/moe/test_gpt_oss_triton_kernels.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from dataclasses import dataclass, fields import pytest import torch import torch.nn.functional as F from vllm.utils.import_utils import has_triton_kernels if not has_triton_kernels(): pytest.skip( "triton_kernels not found, skipping all related tests", allow_module_level=True, ) import triton_kernels.swiglu from triton_kernels.matmul_ogs import FlexCtx, PrecisionConfig from triton_kernels.numerics import InFlexData from triton_kernels.numerics_details.mxfp import downcast_to_mxfp, upcast_from_mxfp from triton_kernels.tensor import FP4, convert_layout, wrap_torch_tensor from triton_kernels.tensor_details import layout from triton_kernels.testing import assert_close from vllm.model_executor.layers.fused_moe.config import FusedMoEQuantConfig from vllm.model_executor.layers.fused_moe.gpt_oss_triton_kernels_moe import ( triton_kernel_moe_forward, ) from vllm.model_executor.layers.utils import shuffle_weight from vllm.utils.math_utils import round_up def deshuffle(w: torch.Tensor): first = w[..., ::2] second = w[..., 1::2] deshuffled = torch.concat((first, second), dim=-1) return deshuffled def init_compute_data(M, K, N, E, a_dtype: str, w_dtype: str, num_warps: int): randbits = [torch.randperm(E) for _ in range(M)] x_list = [ (-1) ** i * ((16384 + ((i * 512) % 4096) + bits).to(torch.int16).view(torch.bfloat16)) for i, bits in enumerate(randbits) ] exp_data = torch.stack(x_list).to(device="cuda") # simulating gate_output (M, E) # create input tensor x = torch.randn((M, K), dtype=torch.bfloat16, device="cuda") w1 = torch.randn((E, 2 * N, K), dtype=torch.bfloat16, device="cuda") w1_bias = torch.randn((E, 2 * N), dtype=torch.bfloat16, device="cuda") w2 = torch.randn((E, K, N), dtype=torch.bfloat16, device="cuda") w2_bias = torch.randn((E, K), dtype=torch.bfloat16, device="cuda") exp_data_tri = exp_data.clone() x_tri = x.clone() w1_tri = w1.clone() w2_tri = w2.clone() w1_bias_tri = w1_bias.clone() w2_bias_tri = w2_bias.clone() w1_bias_tri = w1_bias_tri.to(torch.float32) w2_bias_tri = w2_bias_tri.to(torch.float32) dtype_dict = { "bf16": torch.bfloat16, "fp8_e4m3": torch.float8_e4m3fn, "fp8_e5m2": torch.float8_e5m2, } x = x.to(dtype_dict[a_dtype]).to(torch.bfloat16) if w_dtype != "mx4": # simulate quantization support on reference impl w1 = w1.to(dtype_dict[w_dtype]).to(torch.bfloat16) w2 = w2.to(dtype_dict[w_dtype]).to(torch.bfloat16) # triton moe kernel use transposed shape for matmul w1_tri = w1_tri.transpose(-2, -1) w2_tri = w2_tri.transpose(-2, -1) # shuffle weights w1_tri = shuffle_weight(w1_tri) w1_bias_tri = shuffle_weight(w1_bias_tri) # quant triton_weights x_tri = x.to(dtype_dict[a_dtype]) if w_dtype != "mx4": pytest.skip("NYI") else: # quantize to mx4 # careful on the padding here, the activation padding need to be # multiple of 64, the actual engine is not implemented w1_bottom_pad = round_up(w1_tri.shape[1], 64) - w1_tri.shape[1] w1_right_pad = round_up(w1_tri.shape[2], 128) - w1_tri.shape[2] w2_bottom_pad = w1_right_pad // 2 w2_right_pad = w1_bottom_pad x_pad = w1_bottom_pad w1_tri = F.pad( w1_tri, (0, w1_right_pad, 0, w1_bottom_pad, 0, 0), mode="constant", value=0, ) w2_tri = F.pad( w2_tri, (0, w2_right_pad, 0, w2_bottom_pad, 0, 0), mode="constant", value=0, ) w1_bias_tri = F.pad( w1_bias_tri, (0, w1_right_pad, 0, 0), mode="constant", value=0 ) w2_bias_tri = F.pad( w2_bias_tri, (0, w2_right_pad, 0, 0), mode="constant", value=0 ) x_tri = F.pad(x_tri, (0, x_pad, 0, 0), mode="constant", value=0) w_layout, w_layout_opts = layout.make_default_matmul_mxfp4_w_layout(mx_axis=1) w_scale_layout, w_scale_layout_opts = ( layout.make_default_matmul_mxfp4_w_scale_layout( mx_axis=1, num_warps=num_warps ) ) w1_tri, w1_scale_tri = downcast_to_mxfp(w1_tri, torch.uint8, axis=1) w1 = upcast_from_mxfp(w1_tri, w1_scale_tri, torch.bfloat16, axis=1) w2_tri, w2_scale_tri = downcast_to_mxfp(w2_tri, torch.uint8, axis=1) w2 = upcast_from_mxfp(w2_tri, w2_scale_tri, torch.bfloat16, axis=1) w1_tri = convert_layout( wrap_torch_tensor(w1_tri, FP4), w_layout, **w_layout_opts ) w1_scale_tri = convert_layout( wrap_torch_tensor(w1_scale_tri), w_scale_layout, **w_scale_layout_opts, ) w2_tri = convert_layout( wrap_torch_tensor(w2_tri, FP4), w_layout, **w_layout_opts ) w2_scale_tri = convert_layout( wrap_torch_tensor(w2_scale_tri), w_scale_layout, **w_scale_layout_opts, ) pc1 = PrecisionConfig( weight_scale=w1_scale_tri, flex_ctx=FlexCtx(rhs_data=InFlexData()) ) pc2 = PrecisionConfig( weight_scale=w2_scale_tri, flex_ctx=FlexCtx(rhs_data=InFlexData()) ) # tucuate so the rest can run properly w1 = w1[..., :K, : 2 * N] w2 = w2[..., :N, :K] w1 = deshuffle(w1) w1 = w1.transpose(-1, -2).contiguous() w2 = w2.transpose(-1, -2).contiguous() return ( x, w1, w1_bias, w2, w2_bias, exp_data, x_tri, w1_tri, w2_tri, exp_data_tri, w1_bias_tri, w2_bias_tri, pc1, pc2, ) @dataclass class ModelConfig: num_hidden_layers: int = 36 num_experts: int = 128 experts_per_token: int = 4 vocab_size: int = 201088 hidden_size: int = 2880 intermediate_size: int = 2880 head_dim: int = 64 num_attention_heads: int = 64 num_key_value_heads: int = 8 sliding_window: int = 128 initial_context_length: int = 4096 rope_theta: float = 150000.0 rope_parameters_factor: float = 32.0 rope_ntk_alpha: float = 1.0 rope_ntk_beta: float = 32.0 def swiglu(x, alpha: float = 1.702, limit: float = 1.0): # Note we add an extra bias of 1 to the linear layer x_glu, x_linear = torch.chunk(x, 2, dim=-1) if limit is not None: x_glu = x_glu.clamp(max=limit) out_glu = x_glu * torch.sigmoid(alpha * x_glu) if limit is not None: x_linear = x_linear.clamp(min=-limit, max=limit) return out_glu * (x_linear + 1) def oai_moe_forward( hidden_states: torch.Tensor, # (M, K) w1: torch.Tensor, # (E, 2N) w1_bias: torch.Tensor, # (E, 2N, K) w2: torch.Tensor, # (E, K, N) w2_bias: torch.Tensor, # (E, N) gating_output: torch.Tensor, # (M, E) topk: int, ): # model.py 309:330, assuming gating and norm t = hidden_states experts = torch.topk(gating_output, k=topk, dim=-1, sorted=True) expert_weights = torch.nn.functional.softmax(experts.values, dim=1) expert_indices = experts.indices # MLP #1 mlp1_weight = w1[expert_indices, ...] mlp1_bias = w1_bias[expert_indices, ...] t = torch.einsum("beck,bk->bec", mlp1_weight, t) + mlp1_bias t = swiglu(t, limit=7) # MLP #2 mlp2_weight = w2[expert_indices, ...] mlp2_bias = w2_bias[expert_indices, ...] t = torch.einsum("beck,bek->bec", mlp2_weight, t) t += mlp2_bias # Weighted sum of experts t = torch.einsum("bec,be->bc", t, expert_weights) return t @dataclass class Case: a_dtype: str w_dtype: str @pytest.mark.parametrize( ", ".join(f.name for f in fields(Case)), [ tuple(getattr(case, f.name) for f in fields(Case)) for case in [ # Case(a_dtype="bf16", w_dtype="bf16"), # Case(a_dtype="fp8_e4m3", w_dtype="fp8_e5m2"), Case(a_dtype="bf16", w_dtype="mx4") ] ], ) @pytest.mark.parametrize("num_token", [2]) @pytest.mark.parametrize("tp", [1, 2, 4, 8]) def test_equiv(num_token, a_dtype, w_dtype, tp, workspace_init): from triton_kernels.tensor_details import layout if not hasattr(layout, "make_default_matmul_mxfp4_w_layout"): pytest.skip("make_default_matmul_mxfp4_w_layout not available") M = num_token E = ModelConfig.num_experts K = ModelConfig.hidden_size N = ModelConfig.intermediate_size // tp topk = ModelConfig.experts_per_token ( x, w1, w1_bias, w2, w2_bias, exp_data, x_tri, w1_tri, w2_tri, exp_data_tri, w1_bias_tri, w2_bias_tri, pc1, pc2, ) = init_compute_data(M, K, N, E, a_dtype, w_dtype, num_warps=8) quant_config = FusedMoEQuantConfig.make( w1_bias=w1_bias_tri, w2_bias=w2_bias_tri, w1_scale=pc1, w2_scale=pc2, ) out_triton_monolithic = triton_kernel_moe_forward( hidden_states=x_tri, w1=w1_tri, w2=w2_tri, gating_output=exp_data_tri, topk=topk, renormalize=True, quant_config=quant_config, ) out_triton_monolithic = out_triton_monolithic[..., :K] out_ref = oai_moe_forward( hidden_states=x, w1=w1, w1_bias=w1_bias, w2=w2, w2_bias=w2_bias, gating_output=exp_data, topk=topk, ) assert_close(ref=out_ref, tri=out_triton_monolithic, maxtol=0.025, rmstol=0.005) def test_unit_shuffle(): N = ModelConfig.intermediate_size K = ModelConfig.hidden_size m = torch.randn((K, 2 * N), dtype=torch.bfloat16, device="cuda") x = torch.randn(K, dtype=torch.bfloat16, device="cuda") m_shuffled = shuffle_weight(m) out_ref = x @ m out_ref = swiglu(out_ref, limit=1.0) out = x @ m_shuffled out = triton_kernels.swiglu.swiglu_torch( out, alpha=1.702, precision_config=triton_kernels.swiglu.PrecisionConfig(limit=1.0), ) assert_close(ref=out_ref, tri=out)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_flashinfer_moe.py
tests/kernels/moe/test_flashinfer_moe.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from tests.kernels.moe.utils import make_test_quant_config from tests.kernels.quantization.nvfp4_utils import ( FLOAT4_E2M1_MAX, FLOAT8_E4M3_MAX, dequantize_nvfp4_to_dtype, ) from tests.kernels.utils import torch_moe from vllm import _custom_ops as ops from vllm.config import ParallelConfig, VllmConfig, set_current_vllm_config from vllm.model_executor.layers.fused_moe.flashinfer_cutlass_moe import ( FlashInferExperts, is_valid_flashinfer_cutlass_fused_moe, ) from vllm.model_executor.layers.fused_moe.flashinfer_cutlass_prepare_finalize import ( create_flashinfer_prepare_finalize, ) from vllm.model_executor.layers.fused_moe.fused_moe import fused_topk from vllm.model_executor.layers.fused_moe.modular_kernel import FusedMoEModularKernel from vllm.platforms import current_platform from vllm.utils.flashinfer import has_flashinfer_cutlass_fused_moe if not has_flashinfer_cutlass_fused_moe() or not current_platform.has_device_capability( 100 ): pytest.skip( "Requires flashinfer_cutlass_fused_moe and nvfp4 support", allow_module_level=True, ) MNK_FACTORS = [ (2, 1024, 1024), (2, 3072, 1024), (2, 3072, 1536), (64, 1024, 1536), (64, 3072, 1024), (64, 2048, 1536), (224, 1024, 1024), (224, 1024, 1536), ] @pytest.mark.parametrize("m,n,k", MNK_FACTORS) @pytest.mark.parametrize("e", [40, 64, 256]) @pytest.mark.parametrize("topk", [1, 6, 8]) @pytest.mark.parametrize("dtype", [torch.bfloat16]) @pytest.mark.parametrize("activation", ["silu_and_mul", "relu2"]) @torch.inference_mode() def test_flashinfer_fp4_moe_no_graph( m: int, n: int, k: int, e: int, topk: int, dtype: torch.dtype, activation: str, workspace_init, ): current_platform.seed_everything(7) with set_current_vllm_config( VllmConfig(parallel_config=ParallelConfig(pipeline_parallel_size=1)) ): a = torch.randn((m, k), device="cuda", dtype=dtype) / 10 quant_blocksize = 16 is_gated_act = activation == "silu_and_mul" w1_q, w2_q, quant_config = make_test_quant_config( e, n, k, in_dtype=dtype, quant_dtype="nvfp4", block_shape=None, per_act_token_quant=False, make_gate=is_gated_act, ) score = torch.randn((m, e), device="cuda", dtype=dtype) topk_weights, topk_ids, _ = fused_topk(a, score, topk, renormalize=False) assert is_valid_flashinfer_cutlass_fused_moe(a, w1_q, w2_q) flashinfer_experts = FusedMoEModularKernel( create_flashinfer_prepare_finalize(use_dp=False, use_nvfp4=True), FlashInferExperts(out_dtype=dtype, quant_config=quant_config), ) fi_activation = {"silu_and_mul": "silu", "relu2": "relu2_no_mul"}[activation] flashinfer_output = flashinfer_experts( hidden_states=a, w1=w1_q, w2=w2_q, topk_weights=topk_weights, topk_ids=topk_ids, activation=fi_activation, ) # Reference check: a_global_scale = ( (FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX) / torch.amax(a.flatten(), dim=-1) ).to(torch.float32) a_fp4, a_scale_interleaved = ops.scaled_fp4_quant(a, a_global_scale) _, m_k = a_fp4.shape a_in_dtype = dequantize_nvfp4_to_dtype( a_fp4, a_scale_interleaved, a_global_scale, dtype=a.dtype, device=a.device, block_size=quant_blocksize, ) w1_d = torch.empty( (e, (2 if is_gated_act else 1) * n, k), device="cuda", dtype=dtype ) w2_d = torch.empty((e, k, n), device="cuda", dtype=dtype) for idx in range(0, e): w1_d[idx] = dequantize_nvfp4_to_dtype( w1_q[idx], quant_config.w1_scale[idx], (1 / quant_config.g1_alphas[idx]), dtype=dtype, device=w1_q.device, block_size=quant_blocksize, ) w2_d[idx] = dequantize_nvfp4_to_dtype( w2_q[idx], quant_config.w2_scale[idx], (1 / quant_config.g2_alphas[idx]), dtype=dtype, device=w2_q.device, block_size=quant_blocksize, ) torch_output = torch_moe( a_in_dtype, w1_d, w2_d, score, topk, activation=activation ) torch.testing.assert_close( torch_output, flashinfer_output, atol=1e-1, rtol=1e-1 ) if __name__ == "__main__": test_flashinfer_fp4_moe_no_graph((2, 1024, 1024), 40, 1, torch.half)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/parallel_utils.py
tests/kernels/moe/parallel_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ DeepEP test utilities """ import dataclasses import os import traceback from collections.abc import Callable from typing import Concatenate import torch from torch.distributed import ProcessGroup from torch.multiprocessing import spawn # pyright: ignore[reportPrivateImportUsage] from typing_extensions import ParamSpec from vllm.utils.import_utils import has_deep_ep from vllm.utils.network_utils import get_open_port if has_deep_ep(): from vllm.model_executor.layers.fused_moe.deepep_ht_prepare_finalize import ( DeepEPHTPrepareAndFinalize, ) from vllm.model_executor.layers.fused_moe.deepep_ll_prepare_finalize import ( DeepEPLLPrepareAndFinalize, ) ## Parallel Processes Utils P = ParamSpec("P") @dataclasses.dataclass class ProcessGroupInfo: world_size: int world_local_size: int rank: int node_rank: int local_rank: int device: torch.device def _worker_parallel_launch( local_rank: int, world_size: int, world_local_size: int, node_rank: int, init_method: str, worker: Callable[Concatenate[ProcessGroupInfo, P], None], *args: P.args, **kwargs: P.kwargs, ) -> None: rank = node_rank * world_local_size + local_rank torch.cuda.set_device(local_rank) device = torch.device("cuda", local_rank) torch.distributed.init_process_group( backend="cpu:gloo,cuda:nccl", init_method=init_method, rank=rank, world_size=world_size, device_id=device, ) barrier = torch.tensor([rank], device=device) torch.distributed.all_reduce(barrier) try: worker( ProcessGroupInfo( world_size=world_size, world_local_size=world_local_size, rank=rank, node_rank=node_rank, local_rank=local_rank, device=device, ), *args, **kwargs, ) except Exception as ex: print(ex) traceback.print_exc() raise finally: torch.distributed.destroy_process_group() def parallel_launch( world_size: int, worker: Callable[Concatenate[ProcessGroupInfo, P], None], *args: P.args, **kwargs: P.kwargs, ) -> None: assert not kwargs spawn( _worker_parallel_launch, args=( world_size, world_size, 0, f"tcp://{os.getenv('LOCALHOST', 'localhost')}:{get_open_port()}", worker, ) + args, nprocs=world_size, join=True, ) ## DeepEP specific utils @dataclasses.dataclass class DeepEPHTArgs: num_local_experts: int @dataclasses.dataclass class DeepEPLLArgs: max_tokens_per_rank: int hidden_size: int num_experts: int use_fp8_dispatch: bool def make_deepep_ht_a2a( pg: ProcessGroup, pgi: ProcessGroupInfo, dp_size: int, ht_args: DeepEPHTArgs, q_dtype: torch.dtype | None = None, block_shape: list[int] | None = None, ): import deep_ep # high throughput a2a num_nvl_bytes = 1024 * 1024 * 1024 # 1GB num_rdma_bytes, low_latency_mode, num_qps_per_rank = 0, False, 1 buffer = deep_ep.Buffer( group=pg, num_nvl_bytes=num_nvl_bytes, num_rdma_bytes=num_rdma_bytes, low_latency_mode=low_latency_mode, num_qps_per_rank=num_qps_per_rank, ) return DeepEPHTPrepareAndFinalize( buffer=buffer, num_dispatchers=pgi.world_size, dp_size=dp_size, rank_expert_offset=pgi.rank * ht_args.num_local_experts, ) def make_deepep_ll_a2a( pg: ProcessGroup, pgi: ProcessGroupInfo, deepep_ll_args: DeepEPLLArgs, q_dtype: torch.dtype | None = None, block_shape: list[int] | None = None, ): import deep_ep # low-latency a2a num_rdma_bytes = deep_ep.Buffer.get_low_latency_rdma_size_hint( deepep_ll_args.max_tokens_per_rank, deepep_ll_args.hidden_size, pgi.world_size, deepep_ll_args.num_experts, ) buffer = deep_ep.Buffer( group=pg, num_rdma_bytes=num_rdma_bytes, low_latency_mode=True, num_qps_per_rank=deepep_ll_args.num_experts // pgi.world_size, ) return DeepEPLLPrepareAndFinalize( buffer=buffer, num_dispatchers=pgi.world_size, max_tokens_per_rank=deepep_ll_args.max_tokens_per_rank, use_fp8_dispatch=deepep_ll_args.use_fp8_dispatch, ) def make_deepep_a2a( pg: ProcessGroup, pgi: ProcessGroupInfo, dp_size: int, deepep_ht_args: DeepEPHTArgs | None, deepep_ll_args: DeepEPLLArgs | None, q_dtype: torch.dtype | None = None, block_shape: list[int] | None = None, ): if deepep_ht_args is not None: assert deepep_ll_args is None return make_deepep_ht_a2a( pg, pgi, dp_size, deepep_ht_args, q_dtype, block_shape ) assert deepep_ll_args is not None return make_deepep_ll_a2a(pg, pgi, deepep_ll_args, q_dtype, block_shape)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_cutedsl_moe.py
tests/kernels/moe/test_cutedsl_moe.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from vllm.platforms import current_platform if not current_platform.has_device_capability(100): pytest.skip( reason="Nvfp4 Requires compute capability of 10 or above.", allow_module_level=True, ) import torch from flashinfer import fp4_quantize from torch.nn import functional as F from vllm.model_executor.layers.activation import SiluAndMul from vllm.model_executor.layers.fused_moe.flashinfer_cutedsl_moe import ( flashinfer_cutedsl_moe_masked, ) from vllm.utils.flashinfer import ( flashinfer_cutedsl_grouped_gemm_nt_masked as cutedsl_gmm_masked, ) from vllm.utils.flashinfer import ( scaled_fp4_grouped_quantize, ) kE2M1ToFloat = torch.tensor( [0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 6.0], dtype=torch.float32 ) FLOAT8_E4M3_MAX = 448.0 FLOAT4_E2M1_MAX = 6.0 def convert_swizzled_to_linear(a_sf_swizzled: torch.Tensor, m, k, block_size): m_tiles = (m + 128 - 1) // 128 f = block_size * 4 k_tiles = (k + f - 1) // f tmp = torch.reshape(a_sf_swizzled, (1, m_tiles, k_tiles, 32, 4, 4)) tmp = torch.permute(tmp, (0, 1, 4, 3, 2, 5)) out = tmp.reshape(m_tiles * 128, k_tiles * f // block_size) return out[0:m, 0:k] def dequantize_nvfp4_to_dtype( tensor_fp4, tensor_sf, global_scale, dtype, device, block_size=16 ): """Dequantize the fp4 tensor back to high precision.""" # Two fp4 values are packed into one uint8. assert tensor_fp4.dtype == torch.uint8 m, packed_k = tensor_fp4.shape k = packed_k * 2 tensor_f32 = break_fp4_bytes(tensor_fp4, dtype) tensor_f32 = tensor_f32.reshape(m, k // block_size, block_size) tensor_sf = tensor_sf.view(torch.float8_e4m3fn) tensor_sf = convert_swizzled_to_linear(tensor_sf, m, k, block_size) tensor_sf_dtype = tensor_sf.to(torch.float32) / global_scale # scale the tensor out = (tensor_f32 * tensor_sf_dtype.unsqueeze(-1)).reshape(m, k) return out.to(dtype=dtype) def break_fp4_bytes(a, dtype): assert a.dtype == torch.uint8 m, n = a.shape # Vectorized nibble processing a_flat = a.flatten() high = (a_flat & 0xF0) >> 4 # Upper nibbles low = a_flat & 0x0F # Lower nibbles # Combine nibbles for batch processing combined = torch.stack((low, high), dim=1).flatten() # Vectorized sign and magnitude extraction signs = (combined & 0x08).to(torch.bool) # Sign bits abs_vals = (combined & 0x07).to(torch.long) # Magnitude indices # Device-aware lookup and sign application kE2M1 = kE2M1ToFloat.to(device=a.device) values = kE2M1[abs_vals] * torch.where(signs, -1.0, 1.0) # Reshape to final form return values.reshape(m, n * 2).to(dtype=dtype) def generate_balanced_routing( hidden_states: torch.Tensor, num_experts: int, top_k: int ): """ Generate routing weights and topk indices such that every expert is active. Returns routing_weights, topk_idx """ num_tokens, hidden_dim = hidden_states.shape # num_tokens = batch_size * seq_len # First, assign at least one token per expert tokens_per_expert = torch.arange(num_tokens) % num_experts tokens_per_expert = tokens_per_expert[torch.randperm(num_tokens)] # shuffle # Each token has top_k experts — start with one guaranteed expert topk_idx = torch.full((num_tokens, top_k), -1, dtype=torch.long) topk_idx[:, 0] = tokens_per_expert # For remaining top_k - 1 experts, pick randomly (allowing repeats) if top_k > 1: random_choices = torch.randint(0, num_experts, (num_tokens, top_k - 1)) topk_idx[:, 1:] = random_choices # Normalize routing weights so each token's weights sum to 1 routing_weights = torch.rand(num_tokens, top_k) routing_weights /= routing_weights.sum(dim=-1, keepdim=True) # Reshape back if needed routing_weights = routing_weights.view(num_tokens, top_k) topk_idx = topk_idx.view(num_tokens, top_k) return routing_weights, topk_idx def prepare_inputs( hidden_states: torch.Tensor, router_logits: torch.Tensor, num_experts: int, topk: int, ): routing_weights, topk_idx = generate_balanced_routing( router_logits, num_experts, topk ) masked_m = [] for i in range(num_experts): mask = topk_idx.view(-1) == i masked_m.append(mask.sum()) masked_m = torch.tensor(masked_m, dtype=torch.int32) # Intialize the hidden_states_3d with ones instead of empty to avoid nan # issue. hidden_states_3d = torch.ones( (num_experts, max(masked_m), hidden_states.shape[1]), dtype=hidden_states.dtype ) for i in range(num_experts): hidden_states_3d[i, : masked_m[i], :] = hidden_states[topk_idx.view(-1) == i] return hidden_states_3d, masked_m, topk_idx, routing_weights MNK_FACTORS = [ (2, 1024, 1024), (2, 1024, 1536), (2, 3072, 1024), (2, 3072, 1536), (64, 1024, 1024), (64, 1024, 1536), (64, 3072, 1024), (64, 2048, 1024), (224, 1024, 1024), (224, 1024, 1536), ] # Reference implementation of torch_moe def torch_moe(a, w1, w2, score, topk, expert_map): B, D = a.shape a = a.view(B, -1, D).repeat(1, topk, 1).reshape(-1, D) out = torch.zeros(B * topk, w2.shape[1], dtype=a.dtype, device=a.device) score = torch.softmax(score, dim=-1, dtype=torch.float32) topk_weight, topk_ids = torch.topk(score, topk) topk_weight = topk_weight.view(-1) topk_ids = topk_ids.view(-1) if expert_map is not None: topk_ids = expert_map[topk_ids] for i in range(w1.shape[0]): mask = topk_ids == i if mask.sum(): out[mask] = SiluAndMul()(a[mask] @ w1[i].transpose(0, 1)) @ w2[i].transpose( 0, 1 ) return ( out.view(B, -1, w2.shape[1]) * topk_weight.view(B, -1, 1).to(out.dtype) ).sum(dim=1) def torch_moe_nvfp4(a, w1, w2, topk, topk_weight, topk_ids): B, D = a.shape a = a.view(B, -1, D).repeat(1, topk, 1).reshape(-1, D) out = torch.zeros(B * topk, w2.shape[1], dtype=a.dtype, device=a.device) topk_weight = topk_weight.view(-1) topk_ids = topk_ids.view(-1) for i in range(w1.shape[0]): mask = topk_ids == i if mask.sum(): m = w1[i].shape[0] assert m % 2 == 0 # Note: w1 and w3 are swapped! w3_expert, w1_expert = w1[i][m // 2 :, :], w1[i][: m // 2, :] inter = F.silu(a[mask] @ w1_expert.t()) * (a[mask] @ w3_expert.t()) inter_gs = torch.tensor(1.0).cuda() inter_q, inter_blockscale = fp4_quantize(inter, inter_gs) inter = dequantize_nvfp4_to_dtype( inter_q, inter_blockscale, inter_gs, dtype=inter.dtype, device=inter.device, block_size=16, ).cuda() out[mask] = inter @ w2[i].transpose(0, 1) return ( out.view(B, -1, w2.shape[1]) * topk_weight.view(B, -1, 1).to(out.dtype) ).sum(dim=1) def grouped_gemm_ref( hidden_states_expanded: torch.Tensor, hidden_states_3d: torch.Tensor, weights: torch.Tensor, topk_idx: torch.Tensor, masked_m: torch.Tensor, B: int, topk: int, num_experts: int, *, block_size: int = 16, ) -> torch.Tensor: """ Computes the reference grouped GEMM (fp4 quantized per-expert loop), computes flashinfer grouped GEMM (for scale consistency), and returns ONLY the repacked reference output: out_ref. Returns: out_ref: Tensor [num_experts, max_m, n_out] """ device_hs = hidden_states_expanded.device device_w = weights.device out_dtype = weights.dtype n_out = weights.shape[1] # Flattened reference output (B*topk, n_out) out = torch.zeros((B * topk, n_out), dtype=out_dtype, device=device_w) # Per-expert reference compute loop for i in range(num_experts): mask = topk_idx.view(-1) == i if mask.any(): lhs = hidden_states_expanded[mask] rhs = weights[i] a_amax = lhs.abs().max().to(torch.float32).to(device_hs) b_amax = rhs.abs().max().to(torch.float32).to(device_w) a_gs = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / a_amax b_gs = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / b_amax lhsq, lhsq_sf = fp4_quantize(lhs, a_gs) rhsq, rhsq_sf = fp4_quantize(rhs, b_gs) lhs_in_dtype = dequantize_nvfp4_to_dtype( lhsq, lhsq_sf, a_gs, dtype=lhs.dtype, device=device_hs, block_size=block_size, ) rhs_in_dtype = dequantize_nvfp4_to_dtype( rhsq, rhsq_sf, b_gs, dtype=rhs.dtype, device=device_w, block_size=block_size, ) out[mask] = lhs_in_dtype @ rhs_in_dtype.t() # Determine per-expert max_m max_m_val = int(masked_m.max().item()) # Repack into [num_experts, max_m, n_out] out_ref = torch.zeros( (num_experts, max_m_val, n_out), dtype=out.dtype, device=out.device, ) expert_slot = [0] * num_experts for i, expert_id in enumerate(topk_idx.view(-1).tolist()): slot = expert_slot[expert_id] if slot < max_m_val: out_ref[expert_id, slot, :] = out[i] expert_slot[expert_id] += 1 else: raise IndexError( f"Expert {expert_id} exceeded max slots ({max_m_val}). " "Increase max_m or check masked_m." ) return out_ref def flashinfer_cutedsl_grouped_gemm_nt_masked( hidden_states: torch.Tensor, # 3d input_global_scale: torch.Tensor, # (l,) weights: torch.Tensor, w_global_scale: torch.Tensor, # (l,) masked_m: torch.Tensor, ): # hidden_states: [l, m, k] # weights: [l, n, k] aq, aq_sf = scaled_fp4_grouped_quantize( hidden_states, masked_m.to(hidden_states.device), input_global_scale, ) num_experts, n, k = weights.shape bq, bq_sf = scaled_fp4_grouped_quantize( weights, torch.full((num_experts,), n, device=weights.device, dtype=torch.int32), w_global_scale, ) out = torch.zeros( (num_experts, max(masked_m), n), dtype=weights.dtype, device=aq.device ) out = out.permute(1, 2, 0) # requirement of kernel sf_vec_size = 16 ab_dtype = "float4_e2m1fn" sf_dtype = "float8_e4m3fn" c_dtype = "bfloat16" alpha = 1.0 / (input_global_scale * w_global_scale).to(out.dtype).view( 1, 1, num_experts ) def get_cute_dtype(input: torch.Tensor) -> str: if input.dtype == torch.bfloat16: return "bfloat16" elif input.dtype == torch.float16: return "float16" elif input.dtype == torch.float32: return "float32" else: raise ValueError(f"Unsupported cute dtype {input.dtype}") cutedsl_gmm_masked( (aq, aq_sf), (bq, bq_sf), out, masked_m.to(aq.device), ab_dtype=ab_dtype, sf_dtype=sf_dtype, c_dtype=c_dtype, sf_vec_size=sf_vec_size, alpha=alpha, alpha_dtype=get_cute_dtype(alpha), ) return out @pytest.mark.parametrize("bs, hidden_dim, inter_dim", [(2, 128, 256), (16, 128, 512)]) @pytest.mark.parametrize("topk", [1, 2, 4]) @torch.inference_mode() def test_flashinfer_cutedsl_moe_masked( bs: int, hidden_dim: int, inter_dim: int, topk: int ): torch.manual_seed(42) device = "cuda" num_experts = 8 hidden_states = ( torch.randn(bs, hidden_dim, dtype=torch.bfloat16, device=device) / 5.0 ) w1 = ( torch.randn( num_experts, 2 * inter_dim, hidden_dim, dtype=torch.bfloat16, device=device ) / 10.0 ) w2 = ( torch.randn( num_experts, hidden_dim, inter_dim, dtype=torch.bfloat16, device=device ) / 10.0 ) router_logits = torch.randn(bs, num_experts, dtype=torch.float32) hidden_states_expanded = ( hidden_states.view(bs, -1, hidden_dim) .repeat(1, topk, 1) .reshape(-1, hidden_dim) ) hidden_states_3d, masked_m, topk_idx, routing_weights = prepare_inputs( hidden_states_expanded, router_logits, num_experts, topk ) w1_amax = w1.abs().amax(dim=(1, 2)).to(torch.float32).to(w1.device) w2_amax = w2.abs().amax(dim=(1, 2)).to(torch.float32).to(w2.device) input_global_scale = torch.ones( (num_experts,), dtype=torch.float32, device=hidden_states.device ) w1_global_scale = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / w1_amax w2_global_scale = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / w2_amax a2_global_scale = torch.ones( (num_experts,), dtype=torch.float32, device=hidden_states.device ) # assume intermediate scale is 1.0 w1_fp4, w1_blockscale = scaled_fp4_grouped_quantize( w1, torch.ones(num_experts, dtype=torch.int32, device=w1.device) * 2 * inter_dim, w1_global_scale, ) w2_fp4, w2_blockscale = scaled_fp4_grouped_quantize( w2, torch.ones(num_experts, dtype=torch.int32, device=w2.device) * hidden_dim, w2_global_scale, ) w1_alpha = 1.0 / (input_global_scale * w1_global_scale) w2_alpha = 1.0 / (a2_global_scale * w2_global_scale) out = torch.empty_like(hidden_states_3d) # Note: the 1st dim shouldn't be bs wk = torch.empty( num_experts, hidden_states_3d.shape[1], inter_dim * 2, dtype=hidden_states_3d.dtype, device=hidden_states.device, ) flashinfer_cutedsl_moe_masked( hidden_states_3d.to(hidden_states.device), input_global_scale, w1_fp4.permute(2, 0, 1), w1_blockscale, w1_alpha, w2_fp4.permute(2, 0, 1), a2_global_scale, w2_blockscale, w2_alpha, masked_m.to(hidden_states.device), wk, out, ) # reference a_fp4, a_scale_interleaved = fp4_quantize(hidden_states, input_global_scale) a_in_dtype = dequantize_nvfp4_to_dtype( a_fp4, a_scale_interleaved, input_global_scale, dtype=hidden_states.dtype, device=hidden_states.device, block_size=16, ) w1_d = torch.empty( (num_experts, 2 * inter_dim, hidden_dim), device=w1.device, dtype=w1.dtype ) w2_d = torch.empty( (num_experts, hidden_dim, inter_dim), device=w2.device, dtype=w2.dtype ) for idx in range(0, num_experts): w1_fp4_sliced, w1_blockscale_sliced = fp4_quantize( w1[idx], w1_global_scale[idx] ) w2_fp4_sliced, w2_blockscale_sliced = fp4_quantize( w2[idx], w2_global_scale[idx] ) w1_d[idx] = dequantize_nvfp4_to_dtype( w1_fp4_sliced, w1_blockscale_sliced, w1_global_scale[idx], dtype=w1.dtype, device=w1.device, block_size=16, ) w2_d[idx] = dequantize_nvfp4_to_dtype( w2_fp4_sliced, w2_blockscale_sliced, w2_global_scale[idx], dtype=w2.dtype, device=w2.device, block_size=16, ) ref_output = torch_moe_nvfp4( a_in_dtype, w1_d, w2_d, topk, routing_weights.to(a_in_dtype.device), topk_idx.to(a_in_dtype.device), ) out_weighted = torch.zeros_like(ref_output, device=out.device, dtype=out.dtype) positions = torch.nonzero(masked_m[topk_idx], as_tuple=False) rows, cols = positions[:, 0], positions[:, 1] experts = topk_idx[rows, cols] for i in range(num_experts): mask = experts == i if mask.any(): idx = torch.nonzero(mask, as_tuple=False).squeeze(-1) r, c = rows[idx], cols[idx] out_weighted[r] += out[i, : len(r), :] * routing_weights[r, c].to( out.device ).unsqueeze(-1) torch.testing.assert_close( out_weighted.cpu(), ref_output.cpu(), atol=2e-1, rtol=2e-1 ) @pytest.mark.parametrize( "bs, hidden_dim, inter_dim, topk", [(2, 128, 256, 2), (16, 128, 512, 5)] ) @torch.inference_mode() def test_grouped_gemm_nt_masked( bs: int, hidden_dim: int, inter_dim: int, topk: int ) -> None: torch.manual_seed(42) B = bs D = hidden_dim N = inter_dim # CuteDSL group gemm has issue when not all experts are active. # i.e. masked = [2, 3, 0, 0, 1] where the 2nd and 3rd experts are inactive # see https://github.com/flashinfer-ai/flashinfer/issues/1856 num_experts = bs hidden_states = torch.randn(B, D, dtype=torch.bfloat16, device="cuda") weights = torch.randn(num_experts, N, D, dtype=torch.bfloat16, device="cuda") router_logits = torch.randn(B, num_experts, dtype=torch.float32) hidden_states_expanded = ( hidden_states.view(B, -1, D).repeat(1, topk, 1).reshape(-1, D) ) hidden_states_3d, masked_m, topk_idx, _ = prepare_inputs( hidden_states_expanded, router_logits, num_experts, topk ) a_amax = ( hidden_states_3d.abs() .amax(dim=(1, 2)) .to(torch.float32) .to(hidden_states.device) ) b_amax = weights.abs().amax(dim=(1, 2)).to(torch.float32).to(weights.device) a_gs = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / a_amax b_gs = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / b_amax out_flashinfer = flashinfer_cutedsl_grouped_gemm_nt_masked( hidden_states_3d.to(hidden_states.device), a_gs, weights, b_gs, masked_m ) # reference out_ref = grouped_gemm_ref( hidden_states_expanded=hidden_states_expanded, hidden_states_3d=hidden_states_3d, weights=weights, topk_idx=topk_idx, masked_m=masked_m, B=B, topk=topk, num_experts=num_experts, ) # Note: just to compare the masked position due to cutedsl may write nan # into unmasked position. for i in range(num_experts): torch.testing.assert_close( out_flashinfer.permute(2, 0, 1)[i, : masked_m[i]], out_ref.to(out_flashinfer.device)[i, : masked_m[i]], atol=1e-1, rtol=1e-1, ) if __name__ == "__main__": test_flashinfer_cutedsl_moe_masked(16, 128, 512, 4) test_grouped_gemm_nt_masked(16, 128, 512, 4)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_silu_mul_fp8_quant_deep_gemm.py
tests/kernels/moe/test_silu_mul_fp8_quant_deep_gemm.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import random import pytest import torch from vllm.model_executor.layers.fused_moe.batched_deep_gemm_moe import ( persistent_masked_m_silu_mul_quant, ) from vllm.platforms import current_platform from vllm.utils.deep_gemm import DeepGemmQuantScaleFMT, has_deep_gemm from vllm.utils.math_utils import cdiv, round_up if current_platform.is_fp8_fnuz(): pytest.skip( "Tests in this file require float8_e4m3fn and platform does not support", allow_module_level=True, ) fp8_dtype = torch.float8_e4m3fn CASES = [ (1, 1, 128, fp8_dtype), (1, 4, 128 * 1, fp8_dtype), (2, 4, 128 * 2, fp8_dtype), (1, 4, 128 * 3, fp8_dtype), (8, 16, 128 * 4, fp8_dtype), (8, 16, 128 * 5, fp8_dtype), (8, 16, 128 * 6, fp8_dtype), (8, 16, 128 * 7, fp8_dtype), (8, 16, 128 * 8, fp8_dtype), (8, 16, 128 * 9, fp8_dtype), (8, 64, 7168, fp8_dtype), (8, 128, 128 * 33, fp8_dtype), (1, 4, 128 * 10, fp8_dtype), (8, 128, 7168, fp8_dtype), (8, 512, 7168, fp8_dtype), (8, 1024, 7168, fp8_dtype), (17, 31, 768, fp8_dtype), (32, 64, 256, fp8_dtype), (256, 8, 7168, fp8_dtype), (256, 32, 7168, fp8_dtype), (256, 64, 7168, fp8_dtype), # Only add a few fnuz tests to help with long CI times. (8, 512, 7168, torch.float8_e4m3fnuz), (8, 1024, 7168, torch.float8_e4m3fnuz), ] def as_uint8(x) -> torch.Tensor: return ( torch.empty(x.shape, dtype=x.dtype, device=x.device).copy_(x).view(torch.uint8) ) def silu(x: torch.Tensor) -> torch.Tensor: one_f32 = torch.tensor([1.0], device=x.device, dtype=torch.float32) x_f32 = x.to(torch.float32) act_f32 = x_f32 / (one_f32 + torch.exp(-x_f32)) assert act_f32.dtype == torch.float32 return act_f32.to(torch.bfloat16) def do_quant(x: torch.Tensor, group_size: int, ceil_ue8m0: bool): eps_bf16 = torch.tensor([1e-10], device=x.device, dtype=torch.bfloat16) one_bf16 = torch.tensor([1.0], device=x.device, dtype=torch.bfloat16) fp8_max_bf16 = torch.tensor( [torch.finfo(fp8_dtype).max], device=x.device, dtype=torch.bfloat16 ) fp8_min_bf16 = torch.tensor( [torch.finfo(fp8_dtype).min], device=x.device, dtype=torch.bfloat16 ) fp8_max_inv = one_bf16 / fp8_max_bf16 assert fp8_max_inv.dtype == torch.bfloat16 assert x.size(-1) % group_size == 0 num_groups = x.numel() // group_size x_og_shape = x.shape x = x.to(torch.bfloat16) x = x.view((-1, group_size)) amax = x.abs().amax(dim=1).clamp(min=eps_bf16) assert amax.dtype == torch.bfloat16 s = amax * fp8_max_inv if ceil_ue8m0: s = torch.exp2( torch.ceil(torch.log2(s).to(torch.bfloat16)).to(torch.bfloat16) ).to(torch.bfloat16) inv_s = one_bf16 / s inv_s = inv_s.view((num_groups, 1)) xq = torch.clamp(x * inv_s, min=fp8_min_bf16.item(), max=fp8_max_bf16.item()).to( fp8_dtype ) xq = xq.view(x_og_shape) xs = s.view((-1, xq.size(-1) // group_size)) return xq, xs def silu_mul_quant( gate: torch.Tensor, up: torch.Tensor, group_size: int, ceil_ue8m0: bool ) -> tuple[torch.Tensor, torch.Tensor]: assert gate.size(-1) % group_size == 0 assert up.size(-1) % group_size == 0 assert gate.dtype == torch.bfloat16 assert up.dtype == torch.bfloat16 act_bf16 = silu(gate) assert act_bf16.dtype == torch.bfloat16 # act & mul a_m = act_bf16 * up assert a_m.dtype == torch.bfloat16 q, s = do_quant(a_m, group_size, ceil_ue8m0) return q, s def pack_scales(x: torch.Tensor, tokens_per_expert: torch.Tensor) -> torch.Tensor: """ pack float32 scales into a int32 tensor """ assert x.dtype == torch.float32 E, T, G = x.size() # Add i32_padding here so we can view it as a i32 tensor later on. i32_padding = round_up(G, 4) - G ref_s_i8 = torch.empty((E, T, G + i32_padding), dtype=torch.uint8, device="cuda") for e in range(E): nt = tokens_per_expert[e].item() ref_s_i8[e, :nt, :G] = x[e, :nt].view(torch.int32) >> 23 ref_s_i32 = ref_s_i8.view(torch.int32) return ref_s_i32 def ref_with_scale_fmt( E: int, T: int, H: int, group_size: int, tokens_per_expert: torch.Tensor, gate: torch.Tensor, up: torch.Tensor, scale_fmt: DeepGemmQuantScaleFMT, ) -> tuple[torch.Tensor, torch.Tensor]: """ The precision types of the operations triggered by this function match closely with the kernel implementation so we compare more accurately. """ scale_dtype = ( torch.int32 if scale_fmt == DeepGemmQuantScaleFMT.UE8M0 else torch.float32 ) ceil_ue8m0 = scale_fmt in [ DeepGemmQuantScaleFMT.UE8M0, DeepGemmQuantScaleFMT.FLOAT32_CEIL_UE8M0, ] ref_q = torch.empty((E, T, H), dtype=fp8_dtype, device="cuda") ref_s_f32 = torch.empty( (E, T, cdiv(H, group_size)), dtype=torch.float32, device="cuda" ) for e in range(E): nt = tokens_per_expert[e].item() if nt == 0: continue ref_q[e, :nt], ref_s_f32[e, :nt] = silu_mul_quant( gate[e, :nt], up[e, :nt], group_size, ceil_ue8m0=ceil_ue8m0 ) if scale_dtype == torch.float32: return ref_q, ref_s_f32 assert scale_dtype == torch.int32 return ref_q, pack_scales(ref_s_f32, tokens_per_expert) def token_random(E, T, H2, tokens_per_expert): """ Initialize each token in a random range so we test a range of scale values. """ y = torch.empty((E, T, H2), dtype=torch.bfloat16, device="cuda") for e in range(E): for t in range(tokens_per_expert[e].item()): exp = random.choice(range(1, 20)) y[e, t].uniform_(-(2**exp), 2**exp) return y @pytest.mark.parametrize("E,T,H,fp8_type", CASES) @torch.inference_mode() def test_silu_mul_fp8_quant_deep_gemm(E: int, T: int, H: int, fp8_type: torch.dtype): group_size = 128 current_platform.seed_everything(42) tokens_per_expert = torch.randint( low=0, high=T, size=(E,), dtype=torch.int32, device="cuda", ) # Input tensor of shape (E, T, 2*H) y = token_random(E, T, 2 * H, tokens_per_expert) gate = y[..., :H].to(torch.bfloat16) up = y[..., H:].to(torch.bfloat16) scale_fmts = [ DeepGemmQuantScaleFMT.FLOAT32, DeepGemmQuantScaleFMT.FLOAT32_CEIL_UE8M0, DeepGemmQuantScaleFMT.UE8M0, ] # Run the SiLU V2 kernel for scale_fmt in scale_fmts: y_q, y_s = persistent_masked_m_silu_mul_quant( y, tokens_per_expert, group_size=group_size, quant_scale_fmt=scale_fmt, ) ref_y_q, ref_y_s = ref_with_scale_fmt( E, T, H, group_size, tokens_per_expert, gate, up, scale_fmt=scale_fmt ) # deepgemm scales transform dg_scales = None if ( has_deep_gemm() and current_platform.has_device_capability(100) and scale_fmt == DeepGemmQuantScaleFMT.UE8M0 ): from deep_gemm import transform_sf_into_required_layout _q, _s = ref_with_scale_fmt( E, T, H, group_size, tokens_per_expert, gate, up, scale_fmt=DeepGemmQuantScaleFMT.FLOAT32_CEIL_UE8M0, ) dg_scales = transform_sf_into_required_layout( sf=_s, mn=_q.size(1), k=_q.size(2), recipe=(1, 128, 128), num_groups=_q.size(0), is_sfa=True, ) expected_scale_dtype = ( torch.int32 if scale_fmt == DeepGemmQuantScaleFMT.UE8M0 else torch.float32 ) assert y_s.dtype == expected_scale_dtype assert ref_y_s.dtype == expected_scale_dtype for e in range(E): nt = tokens_per_expert[e].item() torch.testing.assert_close( y_q[e, :nt].to(torch.float32), ref_y_q[e, :nt].to(torch.float32), ) if scale_fmt == DeepGemmQuantScaleFMT.UE8M0: G = H // group_size y_s_sliced = as_uint8(y_s[e]) ref_s_sliced = as_uint8(ref_y_s[e]) torch.testing.assert_close(y_s_sliced[:nt, :G], ref_s_sliced[:nt, :G]) if dg_scales is not None: dg_sliced = as_uint8(dg_scales[e]) torch.testing.assert_close(y_s_sliced[:nt, :G], dg_sliced[:nt, :G]) else: torch.testing.assert_close( y_s[e, :nt], ref_y_s[e, :nt], )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_cutlass_moe.py
tests/kernels/moe/test_cutlass_moe.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import copy import dataclasses from math import prod import pytest import torch from vllm import _custom_ops as ops from vllm.config import ParallelConfig, VllmConfig, set_current_vllm_config from vllm.model_executor.layers.fused_moe.config import ( FUSED_MOE_UNQUANTIZED_CONFIG, fp8_w8a8_moe_quant_config, ) from vllm.model_executor.layers.fused_moe.cutlass_moe import ( cutlass_moe_fp8, run_cutlass_moe_fp8, ) from vllm.model_executor.layers.fused_moe.fused_moe import fused_experts, fused_topk from vllm.model_executor.layers.fused_moe.utils import moe_kernel_quantize_input from vllm.platforms import current_platform NUM_EXPERTS = [40, 64] TOP_KS = [6, 8] MNK_FACTORS = [ (2, 1024, 1024), (2, 3072, 1024), (2, 3072, 1536), (7, 3072, 1536), (64, 1024, 1024), (64, 1024, 1536), (64, 3072, 1024), (224, 1024, 1024), (224, 3072, 1024), (224, 3072, 1536), (32768, 1024, 1024), # These sizes trigger wrong answers. # (7232, 2048, 5120), # (40000, 2048, 5120), ] vllm_config = VllmConfig(parallel_config=ParallelConfig(pipeline_parallel_size=1)) @dataclasses.dataclass class MOETensors: a: torch.Tensor w1: torch.Tensor w2: torch.Tensor ab_strides1: torch.Tensor c_strides1: torch.Tensor ab_strides2: torch.Tensor c_strides2: torch.Tensor @staticmethod def make_moe_tensors( m: int, k: int, n: int, e: int, dtype: torch.dtype ) -> "MOETensors": a = torch.randn((m, k), device="cuda", dtype=dtype) / 10 w1 = torch.randn((e, 2 * n, k), device="cuda", dtype=dtype) / 10 w2 = torch.randn((e, k, n), device="cuda", dtype=dtype) / 10 ab_strides1 = torch.full((e,), k, device="cuda", dtype=torch.int64) c_strides1 = torch.full((e,), 2 * n, device="cuda", dtype=torch.int64) ab_strides2 = torch.full((e,), n, device="cuda", dtype=torch.int64) c_strides2 = torch.full((e,), k, device="cuda", dtype=torch.int64) return MOETensors( a=a, w1=w1, w2=w2, ab_strides1=ab_strides1, c_strides1=c_strides1, ab_strides2=ab_strides2, c_strides2=c_strides2, ) @dataclasses.dataclass class MOETensors8Bit(MOETensors): # quantized a_q: torch.Tensor | None = None # a -> a_q w1_q: torch.Tensor | None = None # w1 -> w1_q w2_q: torch.Tensor | None = None # w2 -> w2_q a_scale: torch.Tensor | None = None w1_scale: torch.Tensor | None = None w2_scale: torch.Tensor | None = None # dequantized a_d: torch.Tensor | None = None # a -> a_q -> a_d w1_d: torch.Tensor | None = None # w1 -> w1_q -> w1_d w2_d: torch.Tensor | None = None # w2 -> w2_q -> w2_d @staticmethod def make_moe_tensors_8bit( m: int, k: int, n: int, e: int, per_act_token: bool, per_out_channel: bool ) -> "MOETensors8Bit": dtype = torch.half q_dtype = torch.float8_e4m3fn moe_tensors_fp16 = MOETensors.make_moe_tensors(m, k, n, e, dtype) # a -> a_q, w1 -> w1_q, w2 -> w2_q n_b_scales = 2 * n if per_out_channel else 1 k_b_scales = k if per_out_channel else 1 # Get the right scale for tests. a_q, a_scale = ops.scaled_fp8_quant( moe_tensors_fp16.a, None, use_per_token_if_dynamic=per_act_token ) w1_q = torch.empty((e, 2 * n, k), device="cuda", dtype=q_dtype) w2_q = torch.empty((e, k, n), device="cuda", dtype=q_dtype) w1_scale = torch.empty((e, n_b_scales, 1), device="cuda", dtype=torch.float32) w2_scale = torch.empty((e, k_b_scales, 1), device="cuda", dtype=torch.float32) for expert in range(e): w1_q[expert], w1_scale[expert] = ops.scaled_fp8_quant( moe_tensors_fp16.w1[expert], use_per_token_if_dynamic=per_out_channel ) w2_q[expert], w2_scale[expert] = ops.scaled_fp8_quant( moe_tensors_fp16.w2[expert], use_per_token_if_dynamic=per_out_channel ) # a_q -> a_d, w1_q -> w1_d, w2_q -> w2_d a_d = a_q.float().mul(a_scale).to(dtype) w1_d = torch.empty_like(moe_tensors_fp16.w1) w2_d = torch.empty_like(moe_tensors_fp16.w2) for expert in range(e): w1_d[expert] = (w1_q[expert].float() * w1_scale[expert]).half() w2_d[expert] = (w2_q[expert].float() * w2_scale[expert]).half() return MOETensors8Bit( a=moe_tensors_fp16.a, w1=moe_tensors_fp16.w1, w2=moe_tensors_fp16.w2, ab_strides1=moe_tensors_fp16.ab_strides1, c_strides1=moe_tensors_fp16.c_strides1, ab_strides2=moe_tensors_fp16.ab_strides2, c_strides2=moe_tensors_fp16.c_strides2, a_q=a_q, w1_q=w1_q, w2_q=w2_q, a_scale=a_scale, w1_scale=w1_scale, w2_scale=w2_scale, a_d=a_d, w1_d=w1_d, w2_d=w2_d, ) def run_with_expert_maps( num_experts: int, num_local_experts: int, **cutlass_moe_kwargs ): def slice_experts(): slice_params = [ "w1_q", "w2_q", "ab_strides1", "ab_strides2", "c_strides1", "c_strides2", ] full_tensors = { k: v for k, v in cutlass_moe_kwargs.items() if k in slice_params and k in cutlass_moe_kwargs } quant_config = cutlass_moe_kwargs["quant_config"] for i in range(0, num_experts, num_local_experts): s, e = i, i + num_local_experts # make expert map expert_map = [-1] * num_experts expert_map[s:e] = list(range(num_local_experts)) expert_map = torch.tensor(expert_map, dtype=torch.int32, device="cuda") # update cutlass moe arg with expert_map cutlass_moe_kwargs["expert_map"] = expert_map # update cutlass moe arg tensors for k, t in full_tensors.items(): cutlass_moe_kwargs[k] = t[s:e] new_quant_config = copy.deepcopy(quant_config) new_quant_config._w1.scale = quant_config.w1_scale[s:e] new_quant_config._w2.scale = quant_config.w2_scale[s:e] cutlass_moe_kwargs["quant_config"] = new_quant_config yield cutlass_moe_kwargs out_tensor = torch.zeros_like(cutlass_moe_kwargs["a"]) for kwargs in slice_experts(): out_tensor = out_tensor + cutlass_moe_fp8(**kwargs) return out_tensor def run_8_bit( moe_tensors: MOETensors8Bit, topk_weights: torch.Tensor, topk_ids: torch.Tensor, per_act_token: bool, per_out_ch: bool, num_local_experts: int | None = None, ) -> torch.Tensor: assert not any( [ t is None for t in [ moe_tensors.w1_q, moe_tensors.w2_q, moe_tensors.w1_scale, moe_tensors.w2_scale, moe_tensors.a_scale, ] ] ) quant_config = fp8_w8a8_moe_quant_config( w1_scale=moe_tensors.w1_scale, w2_scale=moe_tensors.w2_scale, per_act_token_quant=per_act_token, per_out_ch_quant=per_out_ch, # Set to moe_tensors.a_scale iff static scales + per tensor. # This is not currently being tested. a1_scale=None, ) kwargs = { "a": moe_tensors.a, "w1_q": moe_tensors.w1_q, # type: ignore[union-attr] "w2_q": moe_tensors.w2_q, # type: ignore[union-attr] "topk_weights": topk_weights, "topk_ids": topk_ids, "ab_strides1": moe_tensors.ab_strides1, "ab_strides2": moe_tensors.ab_strides2, "c_strides1": moe_tensors.c_strides1, "c_strides2": moe_tensors.c_strides2, "quant_config": quant_config, } num_experts = moe_tensors.w1.size(0) with_ep = num_local_experts is not None or num_local_experts == num_experts if not with_ep: return cutlass_moe_fp8(**kwargs) assert num_local_experts is not None return run_with_expert_maps( num_experts, num_local_experts, # type: ignore[arg-type] **kwargs, ) @pytest.mark.parametrize("m,n,k", MNK_FACTORS) @pytest.mark.parametrize("e", NUM_EXPERTS) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("per_act_token", [True, False]) @pytest.mark.parametrize("per_out_ch", [True, False]) @pytest.mark.skipif( (lambda x: x is None or not ops.cutlass_group_gemm_supported(x.to_int()))( current_platform.get_device_capability() ), reason="Grouped gemm is not supported on this GPU type.", ) def test_cutlass_moe_8_bit_no_graph( m: int, n: int, k: int, e: int, topk: int, per_act_token: bool, per_out_ch: bool, monkeypatch, workspace_init, ep_size: int | None = None, ): current_platform.seed_everything(7) monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", "8192") with set_current_vllm_config(vllm_config): mt = MOETensors8Bit.make_moe_tensors_8bit(m, k, n, e, per_act_token, per_out_ch) score = torch.randn((m, e), device="cuda", dtype=torch.half) topk_weights, topk_ids, _ = fused_topk(mt.a, score, topk, renormalize=False) # Note that we are using the dequantized versions of the tensors. # Using a, w1 and w2 directly results in minor output differences. quant_config = FUSED_MOE_UNQUANTIZED_CONFIG triton_output = fused_experts( mt.a_d, mt.w1_d, mt.w2_d, topk_weights, topk_ids, quant_config=quant_config ) if ep_size is not None: assert e % ep_size == 0, "Cannot distribute experts evenly" number_local_experts = e // ep_size else: number_local_experts = None cutlass_output = run_8_bit( mt, topk_weights, topk_ids, per_act_token, per_out_ch, number_local_experts ) # Note 5.5 only needed for larger problem sizes, 5 works ok for # the rest. torch.testing.assert_close( triton_output, cutlass_output, atol=5.5e-2, rtol=1e-2 ) @pytest.mark.parametrize("m,n,k", MNK_FACTORS) @pytest.mark.parametrize("e", NUM_EXPERTS) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("per_act_token", [True, False]) @pytest.mark.parametrize("per_out_ch", [True, False]) @pytest.mark.skipif( (lambda x: x is None or not ops.cutlass_group_gemm_supported(x.to_int()))( current_platform.get_device_capability() ), reason="Grouped gemm is not supported on this GPU type.", ) def test_cutlass_moe_8_bit_cuda_graph( m: int, n: int, k: int, e: int, topk: int, per_act_token: bool, per_out_ch: bool, monkeypatch, workspace_init, ): current_platform.seed_everything(7) monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", "8192") with set_current_vllm_config(vllm_config): dtype = torch.half mt = MOETensors8Bit.make_moe_tensors_8bit(m, k, n, e, per_act_token, per_out_ch) score = torch.randn((m, e), device="cuda", dtype=dtype) topk_weights, topk_ids, _ = fused_topk(mt.a, score, topk, renormalize=False) # Note that we are using the dequantized versions of the tensors. # Using a, w1 and w2 directly results in minor output differences. quant_config = FUSED_MOE_UNQUANTIZED_CONFIG triton_output = fused_experts( mt.a_d, mt.w1_d, mt.w2_d, topk_weights, topk_ids, quant_config=quant_config ) stream = torch.cuda.Stream() graph = torch.cuda.CUDAGraph() with torch.cuda.graph(graph, stream=stream): cutlass_output = run_8_bit( mt, topk_weights, topk_ids, per_act_token, per_out_ch ) torch.cuda.synchronize() graph.replay() torch.cuda.synchronize() torch.testing.assert_close(triton_output, cutlass_output, atol=9e-2, rtol=1e-2) @pytest.mark.parametrize("m", [64]) @pytest.mark.parametrize("n", [1024]) @pytest.mark.parametrize("k", [4096]) @pytest.mark.parametrize("e", [16]) @pytest.mark.parametrize("topk", [1, 8]) @pytest.mark.parametrize("per_act_token", [True]) @pytest.mark.parametrize("per_out_channel", [True]) @pytest.mark.parametrize("ep_size", [1, 2, 4, 8, 16]) @pytest.mark.skipif( (lambda x: x is None or not ops.cutlass_group_gemm_supported(x.to_int()))( current_platform.get_device_capability() ), reason="Grouped gemm is not supported on this GPU type.", ) def test_cutlass_moe_8_bit_EP( m: int, n: int, k: int, e: int, topk: int, per_act_token: bool, per_out_channel: bool, ep_size: int, monkeypatch, workspace_init, ): test_cutlass_moe_8_bit_no_graph( m, n, k, e, topk, per_act_token, per_out_channel, monkeypatch, workspace_init, ep_size, ) LARGE_MNK_FACTORS = [ (1, 8192, 5120, 31), (32768, 1024, 1024, 16), (65536, 512, 1024, 16), ] @pytest.mark.parametrize("m,n,k,topk", LARGE_MNK_FACTORS) @pytest.mark.parametrize("e", [128]) @pytest.mark.parametrize("per_act_token", [False]) @pytest.mark.parametrize("per_out_channel", [True]) @pytest.mark.parametrize("ep_size", [8]) @pytest.mark.skipif( (lambda x: x is None or not ops.cutlass_group_gemm_supported(x.to_int()))( current_platform.get_device_capability() ), reason="Grouped gemm is not supported on this GPU type.", ) def test_cutlass_moe_8_bit_EP_large( m: int, n: int, k: int, e: int, topk: int, per_act_token: bool, per_out_channel: bool, ep_size: int, monkeypatch, workspace_init, ): test_cutlass_moe_8_bit_no_graph( m, n, k, e, topk, per_act_token, per_out_channel, monkeypatch, workspace_init, ep_size, ) @pytest.mark.parametrize("m,n,k,topk", [(1, 8192, 5120, 31)]) @pytest.mark.parametrize("e", [128]) @pytest.mark.parametrize("per_act_token", [False]) @pytest.mark.parametrize("per_out_channel", [True]) @pytest.mark.parametrize("ep_size", [8]) @pytest.mark.skipif( (lambda x: x is None or not ops.cutlass_group_gemm_supported(x.to_int()))( current_platform.get_device_capability() ), reason="Grouped gemm is not supported on this GPU type.", ) def test_run_cutlass_moe_fp8( m: int, n: int, k: int, e: int, topk: int, per_act_token: bool, per_out_channel: bool, ep_size: int, workspace_init, ): current_platform.seed_everything(7) with set_current_vllm_config(vllm_config): mt = MOETensors8Bit.make_moe_tensors_8bit( m, k, n, e, per_act_token, per_out_channel ) score = torch.randn((m, e), device="cuda", dtype=torch.half) topk_weights, topk_ids, _ = fused_topk(mt.a, score, topk, renormalize=False) # we want to make sure there is at least one token that's generated in # this expert shard and at least one token that's NOT generated in this # expert shard topk_ids[0][0] = -1 topk_ids[0][1] = 1 workspace13_shape = (m * topk, max(2 * n, k)) workspace2_shape = (m * topk, max(n, k)) output_shape = (m, k) workspace13 = torch.empty( prod(workspace13_shape), device="cuda", dtype=mt.a.dtype ) workspace2 = torch.empty( prod(workspace2_shape), device="cuda", dtype=mt.a.dtype ) num_local_experts = e // ep_size start, end = 0, num_local_experts expert_map = [-1] * e expert_map[start:end] = list(range(num_local_experts)) expert_map = torch.tensor(expert_map, dtype=torch.int32, device="cuda") ab_strides1 = torch.full((e,), k, device="cuda", dtype=torch.int64) ab_strides2 = torch.full((e,), n, device="cuda", dtype=torch.int64) c_strides1 = torch.full((e,), 2 * n, device="cuda", dtype=torch.int64) c_strides2 = torch.full((e,), k, device="cuda", dtype=torch.int64) activation = lambda o, i: torch.ops._C.silu_and_mul(o, i) a1q, a1q_scale = moe_kernel_quantize_input( mt.a, mt.a_scale, torch.float8_e4m3fn, per_act_token ) global_num_experts = -1 if mt.w1_q is None else mt.w1_q.size(0) func = lambda output: run_cutlass_moe_fp8( output, a1q, mt.w1_q, mt.w2_q, topk_ids, activation, global_num_experts, expert_map, mt.w1_scale, mt.w2_scale, a1q_scale, None, ab_strides1, ab_strides2, c_strides1, c_strides2, workspace13, workspace2, None, mt.a.dtype, per_act_token, per_out_channel, False, topk_weights, ) workspace13.random_() output_random_workspace = torch.empty( output_shape, device="cuda", dtype=mt.a.dtype ) func(output_random_workspace) workspace13.fill_(0) output_zero_workspace = torch.zeros( output_shape, device="cuda", dtype=mt.a.dtype ) func(output_zero_workspace) torch.testing.assert_close( output_random_workspace, output_zero_workspace, atol=5e-3, rtol=1e-3 )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_moe.py
tests/kernels/moe/test_moe.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Tests for the MOE layers. Run `pytest tests/kernels/test_moe.py`. """ import functools import importlib import sys from collections.abc import Callable from dataclasses import dataclass from typing import Any import pytest import torch from torch.nn import Parameter from torch.nn import functional as F from transformers import MixtralConfig from transformers.models.mixtral.modeling_mixtral import MixtralSparseMoeBlock import vllm.model_executor.layers.fused_moe # noqa from tests.kernels.moe.utils import fused_moe from tests.kernels.utils import opcheck, stack_and_dev, torch_experts, torch_moe from vllm._aiter_ops import rocm_aiter_ops from vllm.config import VllmConfig, set_current_vllm_config from vllm.distributed.parallel_state import init_distributed_environment from vllm.forward_context import set_forward_context from vllm.model_executor.layers.fused_moe.config import ( FUSED_MOE_UNQUANTIZED_CONFIG, int4_w4a16_moe_quant_config, int8_w8a16_moe_quant_config, ) from vllm.model_executor.layers.fused_moe.fused_marlin_moe import ( batched_fused_marlin_moe, fused_marlin_moe, ) from vllm.model_executor.layers.fused_moe.fused_moe import ( fused_topk, modular_triton_fused_moe, ) from vllm.model_executor.layers.fused_moe.moe_torch_iterative import ( fused_moe as iterative_moe, ) from vllm.model_executor.layers.quantization.utils.marlin_utils import ( marlin_permute_bias, ) from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import ( rand_marlin_weight_mxfp4_like, rand_marlin_weight_nvfp4_like, ) from vllm.model_executor.layers.quantization.utils.marlin_utils_fp8 import ( marlin_quant_fp8_torch, ) from vllm.model_executor.layers.quantization.utils.marlin_utils_test import ( awq_marlin_quantize, marlin_quantize, ) from vllm.model_executor.layers.quantization.utils.quant_utils import quantize_weights from vllm.model_executor.models.mixtral import MixtralMoE from vllm.platforms import current_platform from vllm.scalar_type import ScalarType, scalar_types from vllm.v1.worker.workspace import init_workspace_manager NUM_EXPERTS = [8, 64, 192] EP_SIZE = [1, 4] TOP_KS = [2, 6] MOE_MARLIN_QUANT_TEST_CONFIGS = [ # AWQ-INT4 {"b_type": scalar_types.uint4, "group_blocks": [-1, 2, 4, 8]}, # GPTQ-INT4 { "b_type": scalar_types.uint4b8, "support_act_order": True, "group_blocks": [-1, 2, 4, 8], }, # GPTQ-INT8 { "b_type": scalar_types.uint8b128, "support_act_order": True, "group_blocks": [-1, 2, 4, 8], }, # FP8 {"b_type": scalar_types.float8_e4m3fn, "group_blocks": [-1, 8]}, # NVFP4 {"b_type": scalar_types.float4_e2m1f, "group_blocks": [1]}, # MXFP4 { "a_type": [scalar_types.bfloat16], "b_type": scalar_types.float4_e2m1f, "group_blocks": [2], }, # AWQ-INT4 with INT8 activation { "a_type": [scalar_types.int8], "b_type": scalar_types.uint4, "group_blocks": [-1, 2, 4, 8], }, # GPTQ-INT4 with INT8 activation { "a_type": [scalar_types.int8], "b_type": scalar_types.uint4b8, "group_blocks": [-1, 2, 4, 8], }, # GPTQ-INT4 with FP8 activation { "a_type": [scalar_types.float8_e4m3fn], "b_type": scalar_types.uint4b8, "group_blocks": [-1, 2, 4, 8], }, # AWQ-INT4 with FP8 activation { "a_type": [scalar_types.float8_e4m3fn], "b_type": scalar_types.uint4, "group_blocks": [-1, 2, 4, 8], }, # MXFP4 with FP8 activation { "a_type": [scalar_types.float8_e4m3fn], "b_type": scalar_types.float4_e2m1f, "c_type": [scalar_types.bfloat16], "group_blocks": [2], }, ] FUSED_MOE_MNK_FACTORS = [ (1, 128, 128), (1, 2048, 128), (33, 2048, 128), (32768, 2048, 511), (40000, 1024, 1024), ] FUSED_MOE_WN16_MNK_FACTORS = [ (1, 128, 128), (1, 1024, 1024), (32, 2048, 128), (222, 2048, 1024), ] vllm_config = VllmConfig() def run_moe_test( baseline: Callable | torch.Tensor, moe_fn: Callable, a: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, score: torch.Tensor, topk: int, global_num_experts: int = -1, expert_map: torch.Tensor | None = None, padding: bool = False, use_compile: bool = False, use_cudagraph: bool = False, atol: float = 2e-2, rtol: float = 0, ) -> torch.Tensor: if isinstance(baseline, torch.Tensor): baseline_output = baseline else: baseline_output = baseline( a, w1, w2, score, topk, global_num_experts=global_num_experts, expert_map=expert_map, ) # Pad the weight if moe padding is enabled if padding: w1 = F.pad(w1, (0, 128), "constant", 0)[..., 0:-128] w2 = F.pad(w2, (0, 128), "constant", 0)[..., 0:-128] if use_compile: moe_fn = torch.compile(moe_fn, backend="inductor", fullgraph=True) torch._dynamo.mark_dynamic(a, 0) torch._dynamo.mark_dynamic(score, 0) test_output = moe_fn( a, w1, w2, score, topk, global_num_experts=global_num_experts, expert_map=expert_map, ) if use_cudagraph: test_output.fill_(0) stream = torch.cuda.Stream() graph = torch.cuda.CUDAGraph() with torch.cuda.graph(graph, stream=stream): test_output = moe_fn( a, w1, w2, score, topk, global_num_experts=global_num_experts, expert_map=expert_map, ) torch.cuda.synchronize() graph.replay() torch.cuda.synchronize() torch.testing.assert_close(test_output, baseline_output, atol=atol, rtol=rtol) return baseline_output @pytest.mark.parametrize("m,n,k", FUSED_MOE_MNK_FACTORS) @pytest.mark.parametrize("e", NUM_EXPERTS) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("ep_size", EP_SIZE) @pytest.mark.parametrize("dtype", [torch.bfloat16]) @pytest.mark.parametrize("padding", [True, False]) @pytest.mark.parametrize("chunk_size", [8192]) def test_fused_moe( m: int, n: int, k: int, e: int, topk: int, ep_size: int, dtype: torch.dtype, padding: bool, chunk_size: int, monkeypatch, workspace_init, ): current_platform.seed_everything(7) monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", str(chunk_size)) # # Setup test data # # # Setup test data # a = torch.randn((m, k), device="cuda", dtype=dtype) / 10 w1 = torch.randn((e, 2 * n, k), device="cuda", dtype=dtype) / 10 w2 = torch.randn((e, k, n), device="cuda", dtype=dtype) / 10 score = torch.randn((m, e), device="cuda", dtype=dtype) if ep_size > 1: local_e = e // ep_size e_ids = torch.randint(0, e, (local_e,), device="cuda", dtype=torch.int32) e_map = torch.full((e,), -1, device="cuda", dtype=torch.int32) e_map[e_ids] = torch.arange(local_e, device="cuda", dtype=torch.int32) w1 = w1[e_ids] w2 = w2[e_ids] else: e_map = None # # Setup test functions # quant_config = FUSED_MOE_UNQUANTIZED_CONFIG m_fused_moe_fn = modular_triton_fused_moe(quant_config) def m_fused_moe( a: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, score: torch.Tensor, topk: int, global_num_experts: int = -1, expert_map: torch.Tensor | None = None, ) -> torch.Tensor: topk_weights, topk_ids, _ = fused_topk(a, score, topk, False) return m_fused_moe_fn( a, w1, w2, topk_weights, topk_ids, global_num_experts=global_num_experts, expert_map=expert_map, ) fused_moe_fn = functools.partial(fused_moe, renormalize=False) # # Run tests # runner = functools.partial( run_moe_test, a=a, w1=w1, w2=w2, score=score, topk=topk, global_num_experts=e, expert_map=e_map, padding=padding, ) # Note: for now use_compile will error out if the problem size is # large enough to trigger chunking. I'm leaving the flag and # setup code in case we are able to revisit this later. use_compile = False use_cudagraph = n >= 1024 and k >= 1024 and current_platform.is_cuda_alike() with set_current_vllm_config(vllm_config): baseline_output = runner(torch_moe, iterative_moe) runner( baseline_output, fused_moe_fn, use_compile=use_compile, use_cudagraph=use_cudagraph, ) runner( baseline_output, m_fused_moe, use_compile=use_compile, use_cudagraph=use_cudagraph, ) @pytest.mark.parametrize("m,n,k", FUSED_MOE_WN16_MNK_FACTORS) @pytest.mark.parametrize("e", NUM_EXPERTS) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("ep_size", EP_SIZE) @pytest.mark.parametrize("dtype", [torch.bfloat16]) @pytest.mark.parametrize("group_size", [64, 128]) @pytest.mark.parametrize("has_zp", [True, False]) @pytest.mark.parametrize("weight_bits", [4, 8]) def test_fused_moe_wn16( m: int, n: int, k: int, e: int, topk: int, ep_size: int, dtype: torch.dtype, group_size: int, has_zp: bool, weight_bits: int, ): a = torch.randn((m, k), device="cuda", dtype=dtype) / 10 w1 = torch.randn((e, 2 * n, k), device="cuda", dtype=dtype) / 10 w2 = torch.randn((e, k, n), device="cuda", dtype=dtype) / 10 score = torch.randn((m, e), device="cuda", dtype=dtype) if weight_bits == 4: pack_factor = 2 quant_type = scalar_types.uint4 if has_zp else scalar_types.uint4b8 elif weight_bits == 8: pack_factor = 1 quant_type = scalar_types.uint8 if has_zp else scalar_types.uint8b128 w1_ref = w1.clone() w2_ref = w2.clone() w1_qweight = torch.empty( (e, 2 * n, k // pack_factor), device="cuda", dtype=torch.uint8 ) w2_qweight = torch.empty((e, k, n // pack_factor), device="cuda", dtype=torch.uint8) w1_scales = torch.empty((e, 2 * n, k // group_size), device="cuda", dtype=dtype) w2_scales = torch.empty((e, k, n // group_size), device="cuda", dtype=dtype) w1_qzeros = torch.empty( (e, 2 * n // pack_factor, k // group_size), device="cuda", dtype=torch.uint8 ) w2_qzeros = torch.empty( (e, k // pack_factor, n // group_size), device="cuda", dtype=torch.uint8 ) for i in range(e * 2): expert_id = i % e if i // e == 0: w, w_ref, w_qweight, w_scales, w_qzeros = ( w1, w1_ref, w1_qweight, w1_scales, w1_qzeros, ) else: w, w_ref, w_qweight, w_scales, w_qzeros = ( w2, w2_ref, w2_qweight, w2_scales, w2_qzeros, ) weight, qweight, scales, qzeros = quantize_weights( w[expert_id].T, quant_type, group_size, has_zp, False ) weight = weight.T qweight = qweight.T.contiguous().to(torch.uint8) scales = scales.T if has_zp: qzeros = qzeros.T.contiguous().to(torch.uint8) if weight_bits == 4: qweight = qweight[:, 1::2] * 16 + qweight[:, ::2] if has_zp: qzeros = qzeros[1::2, :] * 16 + qzeros[::2, :] w_ref[expert_id] = weight w_qweight[expert_id] = qweight w_scales[expert_id] = scales if has_zp: w_qzeros[expert_id] = qzeros if ep_size > 1: local_e = e // ep_size e_ids = torch.randint(0, e, (local_e,), device="cuda", dtype=torch.int32) e_map = torch.full((e,), -1, device="cuda", dtype=torch.int32) e_map[e_ids] = torch.arange(local_e, device="cuda", dtype=torch.int32) w1_ref = w1_ref[e_ids] w2_ref = w2_ref[e_ids] w1_qweight = w1_qweight[e_ids] w2_qweight = w2_qweight[e_ids] w1_scales = w1_scales[e_ids] w2_scales = w2_scales[e_ids] w1_qzeros = w1_qzeros[e_ids] w2_qzeros = w2_qzeros[e_ids] else: e_map = None if weight_bits == 4: quant_config_builder = int4_w4a16_moe_quant_config else: assert weight_bits == 8 quant_config_builder = int8_w8a16_moe_quant_config quant_config = quant_config_builder( w1_scale=w1_scales, w2_scale=w2_scales, w1_zp=w1_qzeros if has_zp else None, w2_zp=w2_qzeros if has_zp else None, block_shape=[0, group_size], ) with set_current_vllm_config(vllm_config): triton_output = fused_moe( a, w1_qweight, w2_qweight, score, topk, renormalize=False, global_num_experts=e, expert_map=e_map, quant_config=quant_config, ) torch_output = torch_moe(a, w1_ref, w2_ref, score, topk, expert_map=e_map) torch.testing.assert_close(triton_output, torch_output, atol=2e-2, rtol=0) @pytest.mark.parametrize("dtype", [torch.bfloat16]) @pytest.mark.parametrize("padding", [True, False]) @pytest.mark.parametrize( "use_rocm_aiter", [True, False] if current_platform.is_rocm() else [False] ) @torch.inference_mode() def test_mixtral_moe( dist_init, dtype: torch.dtype, padding: bool, use_rocm_aiter: bool, monkeypatch ): """Make sure our Mixtral MoE implementation agrees with the one from huggingface.""" # clear the cache before every test # Force reload aiter_ops to pick up the new environment variables. if "rocm_aiter_ops" in sys.modules: importlib.reload(rocm_aiter_ops) if use_rocm_aiter: monkeypatch.setenv("VLLM_ROCM_USE_AITER", "1") if dtype == torch.float32: pytest.skip("AITER ROCm test skip for float32") monkeypatch.setenv("RANK", "0") monkeypatch.setenv("LOCAL_RANK", "0") monkeypatch.setenv("WORLD_SIZE", "1") monkeypatch.setenv("MASTER_ADDR", "localhost") monkeypatch.setenv("MASTER_PORT", "12345") init_distributed_environment() init_workspace_manager(torch.cuda.current_device()) # Instantiate our and huggingface's MoE blocks vllm_config.compilation_config.static_forward_context = dict() with set_current_vllm_config(vllm_config), set_forward_context(None, vllm_config): config = MixtralConfig() hf_moe = MixtralSparseMoeBlock(config).to(dtype).to("cuda") vllm_moe = MixtralMoE( num_experts=config.num_local_experts, top_k=config.num_experts_per_tok, hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, params_dtype=dtype, tp_size=1, dp_size=1, ).cuda() # Load the weights vllm_moe.gate.weight.data[:] = hf_moe.gate.weight.data for i in range(config.num_local_experts): weights = ( hf_moe.experts[i].w1.weight.data, hf_moe.experts[i].w3.weight.data, ) vllm_moe.experts.w13_weight[i][:] = torch.cat(weights, dim=0) vllm_moe.experts.w2_weight[i][:] = hf_moe.experts[i].w2.weight.data # Generate input batch of dimensions [batch_size, seq_len, hidden_dim] hf_inputs = torch.randn((1, 64, config.hidden_size)).to(dtype).to("cuda") # vLLM uses 1D query [num_tokens, hidden_dim] vllm_inputs = hf_inputs.flatten(0, 1) # Pad the weight if moe padding is enabled if padding: vllm_moe.experts.w13_weight = Parameter( F.pad(vllm_moe.experts.w13_weight, (0, 128), "constant", 0)[ ..., 0:-128 ], requires_grad=False, ) vllm_moe.experts.w2_weight = Parameter( F.pad(vllm_moe.experts.w2_weight, (0, 128), "constant", 0)[..., 0:-128], requires_grad=False, ) torch.cuda.synchronize() torch.cuda.empty_cache() # FIXME (zyongye) fix this after we move self.kernel # assignment in FusedMoE.__init__ vllm_moe.experts.quant_method.process_weights_after_loading(vllm_moe.experts) # Run forward passes for both MoE blocks hf_states, _ = hf_moe.forward(hf_inputs) vllm_states = vllm_moe.forward(vllm_inputs) mixtral_moe_tol = { torch.float32: 1e-3, torch.float16: 1e-3, torch.bfloat16: 1e-2, } if use_rocm_aiter: # The values of rtol and atol are set based on the tests in ROCM AITER package. # https://github.com/ROCm/aiter/blob/dfed377f4be7da96ca2d75ac0761f569676f7240/op_tests/test_moe.py#L174 torch.testing.assert_close( hf_states.flatten(0, 1), vllm_states, rtol=0.01, atol=100 ) else: torch.testing.assert_close( hf_states.flatten(0, 1), vllm_states, rtol=mixtral_moe_tol[dtype], atol=mixtral_moe_tol[dtype], ) def marlin_moe_generate_valid_test_cases(): import itertools m_list = [1, 123, 666] n_list = [128, 1024] k_list = [256, 2048] e_list = [5, 12] topk_list = [2, 3] ep_size_list = [1, 4] act_order_list = [True, False] is_k_full_list = [True, False] all_combinations = itertools.product( MOE_MARLIN_QUANT_TEST_CONFIGS, m_list, n_list, k_list, e_list, topk_list, ep_size_list, act_order_list, is_k_full_list, ) def is_invalid( a_type, b_type, c_type, group_blocks, m, n, k, e, topk, ep_size, act_order, is_k_full, ): group_size = group_blocks if group_blocks <= 0 else group_blocks * 16 if group_size > 0 and k % group_size != 0: return False if act_order and group_size in [-1, k, n]: return False if group_size in [k, n]: return False if not act_order and is_k_full: return False return a_type.size_bits < 16 or a_type is c_type cases = [] for case in all_combinations: quant_test_config, m, n, k, _, _, _, act_order, *_ = case if act_order and not quant_test_config.get("support_act_order", False): continue f16_types = [scalar_types.float16] inner_combinations = itertools.product( quant_test_config.get("a_type", f16_types), [quant_test_config["b_type"]], quant_test_config.get("c_type", f16_types), quant_test_config["group_blocks"], ) for sub_case in inner_combinations: if ( sub_case[0] == scalar_types.float8_e4m3fn and current_platform.get_device_capability() not in [89, 120] ): continue args = sub_case + (m, n, k) + case[4:] if is_invalid(*args): cases.append(args) return cases @dataclass class MarlinMoEWeightData: w_ref: torch.Tensor qweight: torch.Tensor scales: torch.Tensor global_scale: torch.Tensor | None a_scales_factor: torch.Tensor | None g_idx: torch.Tensor | None zeros: torch.Tensor | None sort_indices: torch.Tensor | None marlin_bias: torch.Tensor | None @staticmethod def make( w: torch.Tensor, quant_type: ScalarType, group_size: int, act_order: bool | None = None, bias: torch.Tensor | None = None, input_type: ScalarType = None, ) -> "MarlinMoEWeightData": assert w.ndim == 3 has_zp = quant_type in [scalar_types.uint4, scalar_types.uint8] k = w.shape[-1] if input_type == scalar_types.int8: input_dtype = torch.int8 elif input_type == scalar_types.float8_e4m3fn: input_dtype = torch.float8_e4m3fn else: input_dtype = w.dtype w_ref_l: list[torch.Tensor] = [] qweight_l: list[torch.Tensor] = [] scales_l: list[torch.Tensor] = [] global_scale_l: list[torch.Tensor] = [] zeros_l: list[torch.Tensor] = [] g_idx_l: list[torch.Tensor] = [] sort_indices_l: list[torch.Tensor] = [] bias_l: list[torch.Tensor] = [] for i in range(w.shape[0]): if quant_type == scalar_types.float4_e2m1f: if group_size == 16: w_ref, qweight, scales, global_scale = ( rand_marlin_weight_nvfp4_like( w[i], group_size, input_dtype=input_dtype ) ) else: w_ref, qweight, scales = rand_marlin_weight_mxfp4_like( w[i], group_size, input_dtype=input_dtype ) global_scale = None w_ref_l.append(w_ref.T) qweight_l.append(qweight) scales_l.append(scales) if global_scale is not None: global_scale_l.append(global_scale) elif quant_type == scalar_types.float8_e4m3fn: w_ref, qweight, scales = marlin_quant_fp8_torch( w[i], group_size, input_dtype=input_dtype ) w_ref_l.append(w_ref.T) qweight_l.append(qweight) scales_l.append(scales) elif has_zp: w_ref, qweight, scales, zeros = awq_marlin_quantize( w[i].transpose(1, 0), quant_type, group_size, input_dtype=input_dtype, ) w_ref_l.append(w_ref.T) qweight_l.append(qweight) scales_l.append(scales) zeros_l.append(zeros) else: test_perm = torch.randperm(k) w_ref, qweight, scales, g_idx, sort_indices, _ = marlin_quantize( w[i].transpose(1, 0), quant_type, group_size, act_order, test_perm, input_dtype=input_dtype, ) w_ref_l.append(w_ref.T) qweight_l.append(qweight) scales_l.append(scales) g_idx_l.append(g_idx) sort_indices_l.append(sort_indices) if bias is not None: bias_l.append(marlin_permute_bias(bias[i])) w_ref = stack_and_dev(w_ref_l) qweight = stack_and_dev(qweight_l).contiguous() scales = stack_and_dev(scales_l) global_scale = stack_and_dev(global_scale_l) if global_scale_l else None g_idx = stack_and_dev(g_idx_l) if g_idx_l else None zeros = stack_and_dev(zeros_l) if zeros_l else None sort_indices = stack_and_dev(sort_indices_l) if sort_indices_l else None marlin_bias = stack_and_dev(bias_l) if bias_l else None a_scales_factor = None if input_type == scalar_types.int8 and group_size != -1: a_scales_factor = 1 / 4096 * scales.max().float() scales = scales / scales.max() * 4096 scales = scales.round().to(torch.int16).view(w.dtype) return MarlinMoEWeightData( w_ref=w_ref, qweight=qweight, scales=scales, global_scale=global_scale, a_scales_factor=a_scales_factor, g_idx=g_idx, zeros=zeros, sort_indices=sort_indices, marlin_bias=marlin_bias, ) @pytest.mark.flaky(reruns=2) @pytest.mark.parametrize( ( "a_type, b_type, c_type, group_blocks," "m, n, k, e, topk, ep_size, act_order, is_k_full" ), marlin_moe_generate_valid_test_cases(), ) @pytest.mark.skipif(current_platform.is_rocm(), reason="Skip for rocm") def test_fused_marlin_moe( a_type, b_type, c_type, group_blocks, m, n, k, e, topk, ep_size, act_order, is_k_full, ): torch.cuda.manual_seed(1) group_size = group_blocks if group_blocks <= 0 else group_blocks * 16 if c_type == scalar_types.float16: dtype = torch.float16 elif c_type == scalar_types.bfloat16: dtype = torch.bfloat16 else: raise RuntimeError("unsupported c_type") if a_type == scalar_types.int8: a_dtype = torch.int8 elif a_type == scalar_types.float8_e4m3fn: a_dtype = torch.float8_e4m3fn else: a_dtype = dtype a = torch.randn((m, k), device="cuda", dtype=dtype) / 10 w1 = torch.randn((e, 2 * n, k), device="cuda", dtype=dtype) / 10 w2 = torch.randn((e, k, n), device="cuda", dtype=dtype) / 10 if ep_size > 1: local_e = e // ep_size e_ids = torch.randperm(e, device="cuda", dtype=torch.int32)[:local_e] e_map = torch.full((e,), -1, device="cuda", dtype=torch.int32) e_map[e_ids] = torch.arange(local_e, device="cuda", dtype=torch.int32) w1 = w1[e_ids] w2 = w2[e_ids] else: e_map = None w1_data = MarlinMoEWeightData.make( w=w1, quant_type=b_type, group_size=group_size, act_order=act_order, input_type=a_type, ) w2_data = MarlinMoEWeightData.make( w=w2, quant_type=b_type, group_size=group_size, act_order=act_order, input_type=a_type, ) score = torch.randn((m, e), device="cuda", dtype=dtype) topk_weights, topk_ids, _ = fused_topk(a, score, topk, False) with set_current_vllm_config(vllm_config): score = torch.softmax(score, dim=-1, dtype=torch.float32) topk_weight, topk_ids = torch.topk(score, topk) torch_output = torch_experts( a, w1_data.w_ref, w2_data.w_ref, topk_weight=topk_weight, topk_ids=topk_ids, global_num_experts=e, expert_map=e_map, quant_dtype=a_dtype, per_act_token_quant=True, ) marlin_output = fused_marlin_moe( a, w1_data.qweight, w2_data.qweight, None, None, w1_data.scales, w2_data.scales, score, topk_weights, topk_ids, global_num_experts=e, expert_map=e_map, global_scale1=w1_data.global_scale, global_scale2=w2_data.global_scale, g_idx1=w1_data.g_idx, g_idx2=w2_data.g_idx, input_global_scale1=w1_data.a_scales_factor, input_global_scale2=w2_data.a_scales_factor, sort_indices1=w1_data.sort_indices, sort_indices2=w2_data.sort_indices, w1_zeros=w1_data.zeros, w2_zeros=w2_data.zeros, input_dtype=a_dtype, quant_type_id=b_type.id, is_k_full=is_k_full, ) torch.testing.assert_close(marlin_output, torch_output, atol=4e-2, rtol=0) @pytest.mark.flaky(reruns=2) @pytest.mark.skipif(current_platform.is_rocm(), reason="Skip for rocm") @pytest.mark.parametrize("m", [1, 256]) def test_fused_marlin_moe_with_bias(m): torch.cuda.manual_seed(0) e, topk = 32, 4 n, k = 2048, 2048 group_size = 128 act_order = False is_k_full = True quant_type = scalar_types.uint4b8 dtype = torch.half a = torch.randn((m, k), device="cuda", dtype=dtype) / 10 w1 = torch.randn((e, 2 * n, k), device="cuda", dtype=dtype) / 10 w2 = torch.randn((e, k, n), device="cuda", dtype=dtype) / 10 b_bias1 = torch.randn((e, 2 * n), device="cuda", dtype=dtype) / 10 b_bias2 = torch.randn((e, k), device="cuda", dtype=dtype) / 10 w1_data = MarlinMoEWeightData.make( w=w1, quant_type=quant_type, group_size=group_size, act_order=act_order, bias=b_bias1, ) w2_data = MarlinMoEWeightData.make( w=w2, quant_type=quant_type, group_size=group_size, act_order=act_order, bias=b_bias2, ) score = torch.randn((m, e), device="cuda", dtype=dtype) topk_weights, topk_ids, _ = fused_topk(a, score, topk, False) with set_current_vllm_config(vllm_config): torch_output = torch_moe( a, w1_data.w_ref, w2_data.w_ref, score, topk, b_bias1, b_bias2 ) marlin_output = fused_marlin_moe( a, w1_data.qweight, w2_data.qweight, w1_data.marlin_bias, w2_data.marlin_bias, w1_data.scales, w2_data.scales, score, topk_weights, topk_ids, global_num_experts=e, expert_map=None, global_scale1=w1_data.global_scale, global_scale2=w2_data.global_scale, g_idx1=w1_data.g_idx, g_idx2=w2_data.g_idx, sort_indices1=w1_data.sort_indices, sort_indices2=w2_data.sort_indices, w1_zeros=w1_data.zeros, w2_zeros=w2_data.zeros, quant_type_id=quant_type.id, is_k_full=is_k_full, ) torch.testing.assert_close(marlin_output, torch_output, atol=5e-2, rtol=0) @pytest.mark.parametrize("ep_size", [1, 2]) def test_moe_align_block_size_opcheck(ep_size): num_experts = 4 block_size = 4 expert_map = None if ep_size != 1: local_num_experts = num_experts // ep_size expert_ids = torch.randint( 0, num_experts, (local_num_experts,), device="cuda", dtype=torch.int32 ) expert_map = torch.full((num_experts,), -1, device="cuda", dtype=torch.int32) expert_map[expert_ids] = torch.arange( local_num_experts, device="cuda", dtype=torch.int32 ) topk_ids = torch.randint(0, num_experts, (3, 4), dtype=torch.int32, device="cuda") max_num_tokens_padded = topk_ids.numel() + num_experts * (block_size - 1) sorted_ids = torch.empty( (max_num_tokens_padded,), dtype=torch.int32, device=topk_ids.device ) sorted_ids.fill_(topk_ids.numel()) max_num_m_blocks = max_num_tokens_padded // block_size expert_ids = torch.empty( (max_num_m_blocks,), dtype=torch.int32, device=topk_ids.device ) num_tokens_post_pad = torch.empty((1), dtype=torch.int32, device=topk_ids.device) opcheck( torch.ops._moe_C.moe_align_block_size, ( topk_ids, num_experts, block_size, sorted_ids, expert_ids, num_tokens_post_pad, expert_map, ), ) def test_batched_moe_align_block_size_opcheck(): max_tokens_per_batch = 512 num_experts = 4 block_size = 16 expert_num_tokens = torch.randint( low=0, high=max_tokens_per_batch, size=(num_experts,), dtype=torch.int32, device="cuda", ) max_num_tokens_padded = num_experts * max(max_tokens_per_batch, block_size) sorted_ids = torch.empty((max_num_tokens_padded,), dtype=torch.int32, device="cuda") assert max_num_tokens_padded % block_size == 0 max_num_m_blocks = max_num_tokens_padded // block_size expert_ids = torch.empty((max_num_m_blocks,), dtype=torch.int32, device="cuda") num_tokens_post_pad = torch.empty((1), dtype=torch.int32, device="cuda") opcheck( torch.ops._moe_C.batched_moe_align_block_size, ( max_tokens_per_batch, block_size, expert_num_tokens, sorted_ids, expert_ids, num_tokens_post_pad, ), ) @pytest.mark.parametrize("m", [1, 33, 222]) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("k", [128, 511, 1024]) @pytest.mark.parametrize("dtype", [torch.float32, torch.bfloat16]) @pytest.mark.skipif(current_platform.is_rocm(), reason="Skip for rocm") def test_moe_sum(m: int, topk: int, k: int, dtype: torch.dtype): input = torch.randn((m, topk, k), device="cuda", dtype=dtype) actual = torch.empty((m, k), device="cuda", dtype=dtype) expected = input.sum(dim=1) torch.ops._moe_C.moe_sum(input, actual) torch.testing.assert_close(actual, expected, atol=2e-2, rtol=0)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_deepep_deepgemm_moe.py
tests/kernels/moe/test_deepep_deepgemm_moe.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Test DeepEP + DeepGEMM integration DeepGEMM are gemm kernels specialized for the fp8 block-quantized case. """ import dataclasses from contextlib import contextmanager import pytest import torch.distributed from torch.distributed import ProcessGroup from typing_extensions import ParamSpec from vllm.config import VllmConfig, set_current_vllm_config from vllm.forward_context import set_forward_context from vllm.model_executor.layers.fused_moe.config import ( FusedMoEQuantConfig, fp8_w8a8_moe_quant_config, ) from vllm.model_executor.layers.fused_moe.fused_moe import fused_experts from vllm.model_executor.layers.fused_moe.modular_kernel import FusedMoEModularKernel from vllm.platforms import current_platform from vllm.utils.deep_gemm import ( get_mk_alignment_for_contiguous_layout, is_deep_gemm_e8m0_used, is_deep_gemm_supported, ) from vllm.utils.import_utils import has_deep_ep, has_deep_gemm from vllm.v1.worker.workspace import init_workspace_manager from ...utils import multi_gpu_test from .parallel_utils import ProcessGroupInfo, parallel_launch from .utils import make_test_weights if has_deep_ep(): from vllm.model_executor.layers.fused_moe.deepep_ht_prepare_finalize import ( DeepEPHTPrepareAndFinalize, ) from vllm.model_executor.layers.fused_moe.deepep_ll_prepare_finalize import ( DeepEPLLPrepareAndFinalize, ) from .parallel_utils import DeepEPHTArgs, DeepEPLLArgs, make_deepep_a2a if has_deep_gemm(): from vllm.model_executor.layers.fused_moe.batched_deep_gemm_moe import ( BatchedDeepGemmExperts, ) from vllm.model_executor.layers.fused_moe.deep_gemm_moe import DeepGemmExperts requires_deep_ep = pytest.mark.skipif( not has_deep_ep(), reason="Requires deep_ep kernels", ) requires_deep_gemm = pytest.mark.skipif( not is_deep_gemm_supported(), reason="Requires deep_gemm kernels", ) P = ParamSpec("P") @contextmanager def with_dp_metadata(M: int, world_size: int): num_tokens_across_dp = torch.tensor([M] * world_size, device="cpu", dtype=torch.int) vllm_config = VllmConfig() vllm_config.parallel_config.data_parallel_size = world_size vllm_config.parallel_config.enable_expert_parallel = True with set_forward_context( None, vllm_config, num_tokens=M, num_tokens_across_dp=num_tokens_across_dp, ): yield def next_power_of_2(x): import math if x == 0: return 1 return 2 ** math.ceil(math.log2(x)) def make_block_quant_fp8_weights( e: int, n: int, k: int, block_size: list[int], ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ Return weights w1q, w2q, w1_scale, w2_scale """ (_, w1q, w1_scale, _), (_, w2q, w2_scale, _) = make_test_weights( e, n, k, torch.bfloat16, torch.float8_e4m3fn, block_shape=block_size ) return w1q, w2q, w1_scale, w2_scale @dataclasses.dataclass class TestConfig: topk: int m: int k: int n: int num_experts: int per_act_token_quant: bool block_size: list[int] # configs for testing low-latency kernels low_latency: bool use_fp8_dispatch: bool | None = False @dataclasses.dataclass class TestTensors: rank_tokens: torch.Tensor # all ranks make this many tokens rank_token_scales: torch.Tensor | None topk: torch.Tensor topk_weights: torch.Tensor config: TestConfig @staticmethod def make(config: TestConfig, rank) -> "TestTensors": dtype = torch.bfloat16 topk, m, k = (config.topk, config.m, config.k) fp8_info = torch.finfo(torch.float8_e4m3fn) fp8_max, fp8_min = fp8_info.max, fp8_info.min rank_tokens = ( torch.randn((m, k), device=torch.cuda.current_device(), dtype=dtype) / 10.0 ) rank_tokens = rank_tokens.clamp(min=fp8_min, max=fp8_max) rank_token_scales = None topk_ids = torch.randint( low=0, high=config.num_experts, size=(m, topk), device=torch.cuda.current_device(), ).to(dtype=torch.int64) topk_weights = torch.randn( topk_ids.shape, dtype=torch.float32, device=torch.cuda.current_device() ) return TestTensors( rank_tokens=rank_tokens, rank_token_scales=rank_token_scales, topk=topk_ids, topk_weights=topk_weights, config=config, ) def make_ll_modular_kernel( pg: ProcessGroup, pgi: ProcessGroupInfo, max_tokens_per_rank: int, dp_size: int, hidden_size: int, q_dtype: torch.dtype | None, test_config: TestConfig, quant_config: FusedMoEQuantConfig, ) -> FusedMoEModularKernel: assert test_config.low_latency assert test_config.use_fp8_dispatch is not None a2a: DeepEPLLPrepareAndFinalize = make_deepep_a2a( pg=pg, pgi=pgi, dp_size=dp_size, deepep_ht_args=None, deepep_ll_args=DeepEPLLArgs( max_tokens_per_rank=max_tokens_per_rank, hidden_size=hidden_size, num_experts=test_config.num_experts, use_fp8_dispatch=test_config.use_fp8_dispatch, ), q_dtype=q_dtype, block_shape=test_config.block_size, ) fused_experts = BatchedDeepGemmExperts( max_num_tokens=max_tokens_per_rank, num_dispatchers=pgi.world_size // dp_size, quant_config=quant_config, ) mk = FusedMoEModularKernel(prepare_finalize=a2a, fused_experts=fused_experts) return mk def make_ht_modular_kernel( pg: ProcessGroup, pgi: ProcessGroupInfo, dp_size: int, num_local_experts: int, q_dtype: torch.dtype | None, test_config: TestConfig, quant_config: FusedMoEQuantConfig, ) -> FusedMoEModularKernel: assert not test_config.low_latency assert test_config.use_fp8_dispatch is None a2a: DeepEPHTPrepareAndFinalize = make_deepep_a2a( pg=pg, pgi=pgi, dp_size=dp_size, deepep_ht_args=DeepEPHTArgs(num_local_experts=num_local_experts), deepep_ll_args=None, q_dtype=q_dtype, block_shape=test_config.block_size, ) fused_experts = DeepGemmExperts(quant_config) mk = FusedMoEModularKernel(prepare_finalize=a2a, fused_experts=fused_experts) return mk def make_modular_kernel( pg: ProcessGroup, pgi: ProcessGroupInfo, dp_size: int, num_local_experts: int, test_tensors: TestTensors, quant_config: FusedMoEQuantConfig, ) -> FusedMoEModularKernel: q_dtype = torch.float8_e4m3fn test_config = test_tensors.config mk: FusedMoEModularKernel # Make modular kernel if test_config.low_latency: max_tokens_per_rank = max(64, next_power_of_2(test_tensors.rank_tokens.size(0))) hidden_size = test_tensors.rank_tokens.size(-1) mk = make_ll_modular_kernel( pg=pg, pgi=pgi, max_tokens_per_rank=max_tokens_per_rank, dp_size=dp_size, hidden_size=hidden_size, q_dtype=q_dtype, test_config=test_config, quant_config=quant_config, ) else: mk = make_ht_modular_kernel( pg, pgi, dp_size, num_local_experts, q_dtype, test_config, quant_config=quant_config, ) return mk def deepep_deepgemm_moe_impl( pg: ProcessGroup, pgi: ProcessGroupInfo, dp_size: int, test_tensors: TestTensors, w1: torch.Tensor, w2: torch.Tensor, w1_scale: torch.Tensor | None, w2_scale: torch.Tensor | None, ) -> torch.Tensor: test_config = test_tensors.config num_experts = test_config.num_experts num_local_experts = w1.size(0) def build_expert_map(): num_local_experts = w1.size(0) expert_map = torch.full((num_experts,), fill_value=-1, dtype=torch.int32) s = pgi.rank * num_local_experts e = s + num_local_experts expert_map[s:e] = torch.tensor(list(range(num_local_experts))) return expert_map.to(device=torch.cuda.current_device(), dtype=torch.int32) quant_config = fp8_w8a8_moe_quant_config( w1_scale=w1_scale, w2_scale=w2_scale, # Low-Latency kernels can't dispatch scales. a1_scale=(None if test_config.low_latency else test_tensors.rank_token_scales), block_shape=test_config.block_size, ) # Make modular kernel mk: FusedMoEModularKernel = make_modular_kernel( pg=pg, pgi=pgi, dp_size=dp_size, num_local_experts=num_local_experts, test_tensors=test_tensors, quant_config=quant_config, ) with with_dp_metadata( M=test_tensors.rank_tokens.size(0), world_size=pgi.world_size ): out = mk.forward( hidden_states=test_tensors.rank_tokens, w1=w1, w2=w2, topk_weights=test_tensors.topk_weights, topk_ids=test_tensors.topk, inplace=False, activation="silu", global_num_experts=num_experts, expert_map=build_expert_map(), apply_router_weight_on_input=False, ) return out def triton_impl( a: torch.Tensor, topk_ids: torch.Tensor, topk_weights: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, w1_scale: torch.Tensor, w2_scale: torch.Tensor, a1_scale: torch.Tensor, block_shape: list[int], ): quant_config = fp8_w8a8_moe_quant_config( w1_scale=w1_scale, w2_scale=w2_scale, a1_scale=a1_scale, block_shape=block_shape, ) return fused_experts( hidden_states=a, w1=w1, w2=w2, topk_weights=topk_weights, topk_ids=topk_ids, inplace=False, quant_config=quant_config, # Make sure this is set to False so we # don't end up comparing the same implementation. allow_deep_gemm=False, ) def _test_deepep_deepgemm_moe( pgi: ProcessGroupInfo, dp_size: int, config: TestConfig, w1: torch.Tensor, w2: torch.Tensor, w1_scale: torch.Tensor, w2_scale: torch.Tensor, ): device = torch.device(f"cuda:{pgi.local_rank}") init_workspace_manager(device) current_platform.seed_everything(pgi.rank) w1 = w1.to(device=torch.cuda.current_device()) w2 = w2.to(device=torch.cuda.current_device()) w1_scale = w1_scale.to(device=torch.cuda.current_device()) w2_scale = w2_scale.to(device=torch.cuda.current_device()) pg = torch.distributed.new_group(list(range(pgi.world_size))) test_tensors = TestTensors.make(config, pgi.rank) block_shape = [w1.size(1) // w1_scale.size(1), w1.size(2) // w1_scale.size(2)] with set_current_vllm_config(VllmConfig()): # Reference triton_moe = triton_impl( a=test_tensors.rank_tokens, topk_ids=test_tensors.topk, topk_weights=test_tensors.topk_weights, w1=w1, w2=w2, w1_scale=w1_scale, w2_scale=w2_scale, a1_scale=test_tensors.rank_token_scales, block_shape=block_shape, ) # Slice experts for this rank. num_local_experts = config.num_experts // pgi.world_size e_start = num_local_experts * pgi.rank e_end = e_start + num_local_experts w1_ep = w1[e_start:e_end] w2_ep = w2[e_start:e_end] w1_scale_ep = w1_scale[e_start:e_end] w2_scale_ep = w2_scale[e_start:e_end] deepep_moe = deepep_deepgemm_moe_impl( pg, pgi, dp_size, test_tensors, w1_ep, w2_ep, w1_scale_ep, w2_scale_ep, ) torch.testing.assert_close( triton_moe, deepep_moe, atol=6e-2, rtol=6e-2, ) MNKs = [ (8, 128, 128), (8, 128, 512), (3, 1024, 2048), (32, 128, 1024), (45, 512, 2048), (64, 1024, 1024), (129, 128, 256), (129, 1024, 2048), (222, 1024, 2048), ] TOPKS = [2, 6] NUM_EXPERTS = [32] @pytest.mark.parametrize("mnk", MNKs) @pytest.mark.parametrize("num_experts", NUM_EXPERTS) @pytest.mark.parametrize("topk", TOPKS) @pytest.mark.parametrize("world_dp_size", [(2, 1)]) @multi_gpu_test(num_gpus=2) @requires_deep_ep @requires_deep_gemm def test_ht_deepep_deepgemm_moe( mnk: tuple[int, int, int], num_experts: int, topk: int, world_dp_size: tuple[int, int], disable_deepgemm_ue8m0, workspace_init, ): """ Tests for High-Throughput DeepEP + DeepGemm integration. """ m, n, k = mnk current_platform.seed_everything(7) if topk > num_experts: pytest.skip(f"Skipping test: topk={topk} > E={num_experts}") block_m = get_mk_alignment_for_contiguous_layout()[0] block_size = [block_m, block_m] world_size, dp_size = world_dp_size config = TestConfig( topk=topk, m=m, k=k, n=n, num_experts=num_experts, per_act_token_quant=False, block_size=block_size, low_latency=False, use_fp8_dispatch=None, ) w1, w2, w1_scale, w2_scale = make_block_quant_fp8_weights( num_experts, n, k, block_size ) parallel_launch( world_size, _test_deepep_deepgemm_moe, dp_size, config, w1, w2, w1_scale, w2_scale, ) MNKs = [ (1, 128, 2560), (2, 128, 2560), (3, 1024, 2560), (32, 128, 2560), (45, 512, 2560), (64, 1024, 2560), (222, 1024, 2560), ] # Fix tests for USE_FP8_DISPATCH=True USE_FP8_DISPATCH = [False] @pytest.mark.parametrize("mnk", MNKs) @pytest.mark.parametrize("num_experts", NUM_EXPERTS) @pytest.mark.parametrize("topk", TOPKS) @pytest.mark.parametrize("use_fp8_dispatch", USE_FP8_DISPATCH) @pytest.mark.parametrize("block_size", [[128, 128]]) @pytest.mark.parametrize("world_dp_size", [(2, 1)]) @multi_gpu_test(num_gpus=2) @requires_deep_ep @requires_deep_gemm def test_ll_deepep_deepgemm_moe( mnk: tuple[int, int, int], num_experts: int, topk: int, use_fp8_dispatch: bool, block_size: list[int], world_dp_size: tuple[int, int], disable_deepgemm_ue8m0, workspace_init, ): """ Tests for Low-Latency DeepEP + DeepGemm integration. """ assert not is_deep_gemm_e8m0_used() m, n, k = mnk current_platform.seed_everything(7) if topk > num_experts: pytest.skip(f"Skipping test: topk={topk} > E={num_experts}") world_size, dp_size = world_dp_size config = TestConfig( topk=topk, m=m, k=k, n=n, num_experts=num_experts, per_act_token_quant=False, block_size=block_size, low_latency=True, use_fp8_dispatch=use_fp8_dispatch, ) w1, w2, w1_scale, w2_scale = make_block_quant_fp8_weights( num_experts, n, k, block_size ) parallel_launch( world_size, _test_deepep_deepgemm_moe, dp_size, config, w1, w2, w1_scale, w2_scale, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_triton_moe_ptpc_fp8.py
tests/kernels/moe/test_triton_moe_ptpc_fp8.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # Adapted from https://github.com/sgl-project/sglang/blob/main/test/srt/test_triton_moe_channel_fp8_kernel.py import itertools import pytest import torch from tests.kernels.moe.utils import fused_moe from vllm import _custom_ops as ops from vllm.config import VllmConfig, set_current_vllm_config from vllm.model_executor.layers.activation import SiluAndMul from vllm.model_executor.layers.fused_moe.config import fp8_w8a8_moe_quant_config from vllm.platforms import current_platform if current_platform.get_device_capability() < (9, 0): pytest.skip("FP8 Triton requires CUDA 9.0 or higher", allow_module_level=True) vllm_config = VllmConfig() if current_platform.is_fp8_fnuz(): pytest.skip( "Tests in this file require float8_e4m3fn and platform does not support", allow_module_level=True, ) def native_w8a8_per_token_matmul(A, B, As, Bs, output_dtype=torch.float16): """Matrix multiplication function that supports per-token input quantization and per-column weight quantization""" A = A.to(torch.float32) B = B.to(torch.float32) assert A.shape[-1] == B.shape[-1], "Dimension mismatch" assert B.ndim == 2 and B.is_contiguous(), "B must be a 2D contiguous tensor" # Reshape input M = A.numel() // A.shape[-1] B = B.t() # Transpose weight matrix N, K = B.shape origin_C_shape = A.shape[:-1] + (K,) A = A.reshape(M, N) # As is per-token [M, 1], Bs is per-column [1, K] C = torch.matmul(A, B) # [M, K] C = As * C * Bs.view(1, -1) # Broadcast per-column scale return C.reshape(origin_C_shape).to(output_dtype) def fp8_mask(a, mask): dtype = a.dtype return a.view(torch.int8)[mask].view(dtype) def torch_w8a8_per_column_moe(a, w1, w2, w1_s, w2_s, score, topk): """This function performs fused moe with per-column int8 quantization using native torch.""" B, D = a.shape # Perform per-token quantization a_q, a_s = ops.scaled_fp8_quant(a, use_per_token_if_dynamic=True) # Repeat tokens to match topk a_q = a_q.view(B, -1, D).repeat(1, topk, 1).reshape(-1, D) # Also repeat the scale a_s = a_s.view(B, -1, 1).repeat(1, topk, 1).reshape(-1, 1) # [B*topk, 1] out = torch.zeros(B * topk, w2.shape[1], dtype=a.dtype, device=a.device) # Calculate routing score = torch.softmax(score, dim=-1, dtype=torch.float32) topk_weight, topk_ids = torch.topk(score, topk) topk_weight = topk_weight.view(-1) topk_ids = topk_ids.view(-1) # Process each expert for i in range(w1.shape[0]): mask = topk_ids == i if mask.sum(): # First MLP layer: note that a_s is now per-token inter_out = native_w8a8_per_token_matmul( fp8_mask(a_q, mask), w1[i], fp8_mask(a_s, mask), w1_s[i], output_dtype=a.dtype, ) # Activation function act_out = SiluAndMul().forward_native(inter_out) # Quantize activation output with per-token act_out_q, act_out_s = ops.scaled_fp8_quant( act_out, use_per_token_if_dynamic=True ) # Second MLP layer out[mask] = native_w8a8_per_token_matmul( act_out_q, w2[i], act_out_s, w2_s[i], output_dtype=a.dtype ) # Apply routing weights and sum return ( out.view(B, -1, w2.shape[1]) * topk_weight.view(B, -1, 1).to(out.dtype) ).sum(dim=1) @pytest.fixture(autouse=True, scope="module") def setup_cuda(): """Sets the default CUDA device for all tests in this module.""" torch.set_default_device("cuda") DTYPES = [torch.half, torch.bfloat16] M = [1, 33] N = [128, 1024] K = [256, 4096] E = [8] TOP_KS = [2, 6] SEEDS = [0] @pytest.mark.parametrize( "M, N, K, E, topk, dtype, seed", itertools.product(M, N, K, E, TOP_KS, DTYPES, SEEDS), ) @torch.inference_mode() def test_w8a8_fp8_fused_moe(M, N, K, E, topk, dtype, seed): torch.manual_seed(seed) # Initialize int8 quantization parameters factor_for_scale = 1e-2 finfo = torch.finfo(torch.float8_e4m3fn) fp8_max = finfo.max fp8_min = finfo.min # Input tensor # M * K a = torch.randn((M, K), dtype=dtype) / 10 # Generate int8 weights w1_fp32 = (torch.rand((E, 2 * N, K), dtype=torch.float32) - 0.5) * 2 w1 = (w1_fp32 * fp8_max).clamp(min=fp8_min, max=fp8_max).to(torch.float8_e4m3fn) w2_fp32 = (torch.rand((E, K, N), dtype=torch.float32) - 0.5) * 2 w2 = (w2_fp32 * fp8_max).clamp(min=fp8_min, max=fp8_max).to(torch.float8_e4m3fn) # Generate scale for each column (per-column quantization) w1_s = torch.rand(E, 2 * N, device=w1_fp32.device) * factor_for_scale w2_s = torch.rand(E, K, device=w2_fp32.device) * factor_for_scale score = torch.randn((M, E), dtype=dtype) with set_current_vllm_config(vllm_config): ref_out = torch_w8a8_per_column_moe(a, w1, w2, w1_s, w2_s, score, topk) out = fused_moe( a, w1, w2, score, topk, renormalize=False, quant_config=fp8_w8a8_moe_quant_config( per_act_token_quant=True, w1_scale=w1_s, w2_scale=w2_s, block_shape=None, # Not using block quantization ), ) # Check results rel_diff = torch.mean( torch.abs(out.to(torch.float32) - ref_out.to(torch.float32)) ) / torch.mean(torch.abs(ref_out.to(torch.float32))) assert rel_diff < 0.05
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_nvfp4_moe.py
tests/kernels/moe/test_nvfp4_moe.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from tests.kernels.moe.utils import make_test_weights from tests.kernels.quantization.nvfp4_utils import ( FLOAT4_E2M1_MAX, FLOAT8_E4M3_MAX, dequantize_nvfp4_to_dtype, ) from tests.kernels.utils import torch_moe from vllm import _custom_ops as ops from vllm.config import ParallelConfig, VllmConfig, set_current_vllm_config from vllm.model_executor.layers.fused_moe.config import nvfp4_moe_quant_config from vllm.model_executor.layers.fused_moe.cutlass_moe import cutlass_moe_fp4 from vllm.model_executor.layers.fused_moe.fused_moe import fused_topk from vllm.platforms import current_platform if not current_platform.has_device_capability(100): pytest.skip( "Nvfp4 Requires compute capability of 10 or above.", allow_module_level=True ) MNK_FACTORS = [ (2, 1024, 1024), (2, 1024, 1536), (2, 3072, 1024), (64, 1024, 1024), (64, 3072, 1024), (64, 2048, 1536), (224, 1024, 1024), (224, 1024, 1536), ] @pytest.mark.parametrize("m,n,k", MNK_FACTORS) @pytest.mark.parametrize("e", [40, 64, 256]) @pytest.mark.parametrize("topk", [1, 6, 8]) @pytest.mark.parametrize("dtype", [torch.bfloat16]) @torch.inference_mode() def test_cutlass_fp4_moe_no_graph( m: int, n: int, k: int, e: int, topk: int, dtype: torch.dtype, workspace_init ): current_platform.seed_everything(7) with set_current_vllm_config( VllmConfig(parallel_config=ParallelConfig(pipeline_parallel_size=1)) ): quant_blocksize = 16 a = torch.randn((m, k), device="cuda", dtype=dtype) / 10 (_, w1_q, w1_blockscale, w1_gs), (_, w2_q, w2_blockscale, w2_gs) = ( make_test_weights( e, n, k, in_dtype=dtype, quant_dtype="nvfp4", block_shape=None, # use quant_blocksize? per_out_ch_quant=False, ) ) score = torch.randn((m, e), device="cuda", dtype=dtype) topk_weights, topk_ids, _ = fused_topk(a, score, topk, renormalize=False) a1_gs = torch.ones((e,), device="cuda", dtype=torch.float32) a2_gs = torch.ones((e,), device="cuda", dtype=torch.float32) assert w1_gs is not None assert w2_gs is not None assert w1_blockscale is not None assert w2_blockscale is not None quant_config = nvfp4_moe_quant_config( g1_alphas=(1 / w1_gs), g2_alphas=(1 / w2_gs), a1_gscale=a1_gs, a2_gscale=a2_gs, w1_scale=w1_blockscale, w2_scale=w2_blockscale, ) cutlass_output = cutlass_moe_fp4( a=a, w1_fp4=w1_q, w2_fp4=w2_q, topk_weights=topk_weights, topk_ids=topk_ids, quant_config=quant_config, m=m, n=n, k=k, e=e, ) # Reference check: a_global_scale = ( (FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX) / torch.amax(a.flatten(), dim=-1) ).to(torch.float32) a_fp4, a_scale_interleaved = ops.scaled_fp4_quant(a, a_global_scale) a_in_dtype = dequantize_nvfp4_to_dtype( a_fp4, a_scale_interleaved, a_global_scale, dtype=a.dtype, device=a.device, block_size=quant_blocksize, ) w1_d = torch.empty((e, 2 * n, k), device="cuda", dtype=dtype) w2_d = torch.empty((e, k, n), device="cuda", dtype=dtype) for idx in range(0, e): w1_d[idx] = dequantize_nvfp4_to_dtype( w1_q[idx], w1_blockscale[idx], w1_gs[idx], dtype=dtype, device=w1_q.device, block_size=quant_blocksize, ) w2_d[idx] = dequantize_nvfp4_to_dtype( w2_q[idx], w2_blockscale[idx], w2_gs[idx], dtype=dtype, device=w2_q.device, block_size=quant_blocksize, ) torch_output = torch_moe(a_in_dtype, w1_d, w2_d, score, topk) torch.testing.assert_close(torch_output, cutlass_output, atol=1e-1, rtol=1e-1) if __name__ == "__main__": test_cutlass_fp4_moe_no_graph((2, 1024, 1024), 40, 1, torch.half)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_grouped_topk.py
tests/kernels/moe/test_grouped_topk.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Tests for the MoE grouped topk kernel Run `pytest tests/kernels/moe/test_grouped_topk.py`. """ import pytest import torch from vllm.config import ( CompilationConfig, VllmConfig, get_cached_compilation_config, set_current_vllm_config, ) from vllm.model_executor.layers.fused_moe.fused_moe import ( GroupedTopk, fused_grouped_topk, ) from vllm.platforms import current_platform @pytest.mark.skipif( not current_platform.is_cuda(), reason="This test is skipped on non-CUDA platform." ) @pytest.mark.parametrize("n_token", [1, 33, 64]) @pytest.mark.parametrize("n_hidden", [1024, 2048]) @pytest.mark.parametrize("n_expert", [16]) @pytest.mark.parametrize("topk", [2]) @pytest.mark.parametrize("renormalize", [True, False]) @pytest.mark.parametrize("num_expert_group", [8]) @pytest.mark.parametrize("topk_group", [2]) @pytest.mark.parametrize("scoring_func", ["softmax", "sigmoid"]) @pytest.mark.parametrize("routed_scaling_factor", [1.0, 2.5]) @pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float32]) def test_grouped_topk( monkeypatch: pytest.MonkeyPatch, n_token: int, n_hidden: int, n_expert: int, topk: int, renormalize: bool, num_expert_group: int, topk_group: int, scoring_func: str, routed_scaling_factor: float, dtype: torch.dtype, ): vllm_config = VllmConfig( compilation_config=CompilationConfig(custom_ops=["all", "+grouped_topk"]) ) get_cached_compilation_config.cache_clear() current_platform.seed_everything(0) hidden_states = torch.randn((n_token, n_hidden), dtype=dtype, device="cuda") gating_output = torch.randn((n_token, n_expert), dtype=dtype, device="cuda") e_score_correction_bias = torch.randn( (n_expert,), dtype=torch.float32, device="cuda" ) with set_current_vllm_config(vllm_config), monkeypatch.context() as m: m.setenv("VLLM_USE_FUSED_MOE_GROUPED_TOPK", "0") grouped_topk = GroupedTopk( topk=topk, renormalize=renormalize, num_expert_group=num_expert_group, topk_group=topk_group, scoring_func=scoring_func, routed_scaling_factor=routed_scaling_factor, ) assert grouped_topk._forward_method.__name__ == "forward_cuda" baseline_topk_weights, baseline_topk_ids = grouped_topk( hidden_states=hidden_states, gating_output=gating_output, e_score_correction_bias=e_score_correction_bias, ) test_topk_weights, test_topk_ids = fused_grouped_topk( hidden_states=hidden_states, gating_output=gating_output, topk=topk, renormalize=renormalize, num_expert_group=num_expert_group, topk_group=topk_group, scoring_func=scoring_func, routed_scaling_factor=routed_scaling_factor, e_score_correction_bias=e_score_correction_bias, ) if renormalize: torch.testing.assert_close( baseline_topk_weights, test_topk_weights, atol=2e-2, rtol=0 ) torch.testing.assert_close(baseline_topk_ids, test_topk_ids, atol=0, rtol=0)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_flashinfer.py
tests/kernels/moe/test_flashinfer.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from dataclasses import dataclass import pytest import torch import vllm.model_executor.layers.fused_moe.modular_kernel as mk from vllm.config import ParallelConfig, VllmConfig, set_current_vllm_config from vllm.model_executor.layers.fused_moe.config import ( FusedMoEQuantConfig, fp8_w8a8_moe_quant_config, ) from vllm.model_executor.layers.fused_moe.fused_moe import fused_experts from vllm.model_executor.layers.quantization.utils.flashinfer_utils import ( apply_flashinfer_per_tensor_scale_fp8, flashinfer_cutlass_moe_fp8, register_moe_scaling_factors, rotate_flashinfer_fp8_moe_weights, swap_w13_to_w31, ) from vllm.model_executor.layers.quantization.utils.fp8_utils import input_to_float8 from vllm.model_executor.models.llama4 import Llama4MoE from vllm.platforms import current_platform try: from vllm.utils.flashinfer import has_flashinfer_cutlass_fused_moe except ImportError: if current_platform.is_rocm(): pytest.skip( "flashinfer not supported for vLLM on ROCm", allow_module_level=True ) if not has_flashinfer_cutlass_fused_moe() or not current_platform.has_device_capability( 90 ): pytest.skip( "Supported for sm >= 90", allow_module_level=True, ) NUM_EXPERTS = [16] TOP_KS = [1] MNK_FACTORS = [ (256, 8192, 5120), (127, 4096, 5120), (10, 8192, 5120), (10, 4096, 5120), (1, 8192, 5120), (1, 4096, 5120), ] vllm_config = VllmConfig(parallel_config=ParallelConfig(pipeline_parallel_size=1)) def quant_fp8_per_tensor_batches(a): num_batches = a.size(0) a_quant = [] a_scales = [] for i in range(num_batches): a_fp8, a_global_sf = input_to_float8(a[i]) a_global_sf = 1.0 / a_global_sf a_quant.append(a_fp8) a_scales.append(a_global_sf) result_a_quant = torch.stack(a_quant) result_a_scales = torch.stack(a_scales) return result_a_quant, result_a_scales @dataclass class TestData: hidden_states: torch.Tensor w13_quantized: torch.Tensor w2_quantized: torch.Tensor a1_scale: torch.Tensor a2_scale: torch.Tensor w13_weight_scale: torch.Tensor w2_weight_scale: torch.Tensor layer: torch.nn.Module @staticmethod def make_moe_tensors_8bit( m: int, k: int, n: int, e: int, reorder: bool, activation: str = "silu" ) -> "TestData": is_gated = activation != "relu2_no_mul" hidden_states = torch.randn((m, k), device="cuda", dtype=torch.bfloat16) / 10 w13 = torch.randn( (e, (2 * n) if is_gated else n, k), device="cuda", dtype=torch.bfloat16 ) w2 = torch.randn((e, k, n), device="cuda", dtype=torch.bfloat16) # Scale to fp8 _, a1_scale = input_to_float8(hidden_states) a1_scale = 1.0 / a1_scale a2_scale = torch.scalar_tensor(1.0).to(device="cuda").to(dtype=torch.float32) w13_quantized, w13_weight_scale = quant_fp8_per_tensor_batches(w13) w2_quantized, w2_weight_scale = quant_fp8_per_tensor_batches(w2) layer = torch.nn.Module() layer.w13_weight = w13_quantized.clone() layer.w2_weight = w2_quantized.clone() layer.w13_input_scale = a1_scale layer.w2_input_scale = a2_scale layer.w13_weight_scale = w13_weight_scale layer.w2_weight_scale = w2_weight_scale # Setup dummy config. layer.moe_parallel_config = mk.FusedMoEParallelConfig( tp_size=1, pcp_size=1, dp_size=1, ep_size=1, tp_rank=1, pcp_rank=1, dp_rank=1, ep_rank=1, use_ep=False, all2all_backend="naive", ) register_moe_scaling_factors(layer) # flashinfer expects swapped rows for w13 layer.w13_weight.data = swap_w13_to_w31(layer.w13_weight.data) if reorder: rotate_flashinfer_fp8_moe_weights(layer.w13_weight, layer.w2_weight) layer.custom_routing_function = Llama4MoE.custom_routing_function layer.intermediate_size_per_partition = n layer.ep_rank = 0 layer.local_num_experts = e return TestData( hidden_states=hidden_states, w13_quantized=w13_quantized, w2_quantized=w2_quantized, a1_scale=a1_scale, a2_scale=a2_scale, w13_weight_scale=w13_weight_scale, w2_weight_scale=w2_weight_scale, layer=layer, ) @pytest.mark.parametrize("m,n,k", MNK_FACTORS) @pytest.mark.parametrize("e", NUM_EXPERTS) @pytest.mark.parametrize("topk", TOP_KS) def test_flashinfer_per_tensor_moe_fp8_no_graph( m: int, n: int, k: int, e: int, topk: int, monkeypatch, ): if not current_platform.has_device_capability(100): pytest.skip("Test is only supported for sm >= 100") current_platform.seed_everything(7) monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", "8192") with set_current_vllm_config(vllm_config): td = TestData.make_moe_tensors_8bit(m, k, n, e, reorder=True) score = torch.randn((m, e), device="cuda", dtype=torch.bfloat16) topk_weights, topk_ids = Llama4MoE.custom_routing_function( hidden_states=td.hidden_states, gating_output=score, topk=topk, renormalize=False, ) quant_config = fp8_w8a8_moe_quant_config( w1_scale=td.w13_weight_scale, w2_scale=td.w2_weight_scale, a1_scale=td.a1_scale, a2_scale=td.a2_scale, per_act_token_quant=False, ) output = fused_experts( td.hidden_states, td.w13_quantized, td.w2_quantized, topk_weights=topk_weights, topk_ids=topk_ids, inplace=False, activation="silu", global_num_experts=e, expert_map=None, apply_router_weight_on_input=True, quant_config=quant_config, ) flashinfer_output = apply_flashinfer_per_tensor_scale_fp8( layer=td.layer, hidden_states=td.hidden_states, router_logits=score, routing_bias=None, global_num_experts=e, top_k=topk, num_expert_group=None, topk_group=None, apply_router_weight_on_input=True, ) torch.testing.assert_close(output, flashinfer_output, atol=5.5e-2, rtol=1e-2) @pytest.mark.parametrize("m,n,k", MNK_FACTORS) @pytest.mark.parametrize("e", NUM_EXPERTS) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("activation", ["silu", "relu2_no_mul"]) def test_flashinfer_cutlass_moe_fp8_no_graph( m: int, n: int, k: int, e: int, topk: int, activation: str, monkeypatch, workspace_init, ): current_platform.seed_everything(7) monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", "8192") with set_current_vllm_config(vllm_config): td = TestData.make_moe_tensors_8bit( m, k, n, e, reorder=False, activation=activation ) score = torch.randn((m, e), device="cuda", dtype=torch.bfloat16) topk_weights, topk_ids = Llama4MoE.custom_routing_function( hidden_states=td.hidden_states, gating_output=score, topk=topk, renormalize=False, ) quant_config = fp8_w8a8_moe_quant_config( w1_scale=td.w13_weight_scale, g1_alphas=(td.w13_weight_scale * td.a1_scale).squeeze(), w2_scale=td.w2_weight_scale, g2_alphas=(td.w2_weight_scale * td.a2_scale).squeeze(), a1_scale=td.a1_scale, a1_gscale=td.a1_scale, a2_scale=td.a2_scale, a2_gscale=1.0 / td.a2_scale, per_act_token_quant=False, ) output = fused_experts( td.hidden_states, td.w13_quantized, td.w2_quantized, topk_weights=topk_weights, topk_ids=topk_ids, inplace=False, activation=activation, global_num_experts=e, expert_map=None, apply_router_weight_on_input=True, quant_config=quant_config, ) td.layer.dp_size = 1 def get_fused_moe_quant_config(n: torch.nn.Module) -> FusedMoEQuantConfig: return quant_config td.layer.get_fused_moe_quant_config = get_fused_moe_quant_config td.layer.quant_method = td.layer flashinfer_cutlass_output = flashinfer_cutlass_moe_fp8( td.hidden_states, td.layer, topk_weights, topk_ids, activation=activation, global_num_experts=e, expert_map=None, apply_router_weight_on_input=True, ) torch.testing.assert_close( output, flashinfer_cutlass_output, atol=5.5e-2, rtol=1e-2 )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_batched_deepgemm.py
tests/kernels/moe/test_batched_deepgemm.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from vllm.model_executor.layers.fused_moe.batched_deep_gemm_moe import ( BatchedDeepGemmExperts, ) from vllm.model_executor.layers.fused_moe.config import fp8_w8a8_moe_quant_config from vllm.model_executor.layers.fused_moe.fused_batched_moe import ( BatchedPrepareAndFinalize, BatchedTritonExperts, ) from vllm.model_executor.layers.fused_moe.modular_kernel import FusedMoEModularKernel from vllm.utils.deep_gemm import calc_diff, is_deep_gemm_supported from .test_deepgemm import make_block_quant_fp8_weights BLOCK_SIZE = [128, 128] @pytest.mark.skipif(not is_deep_gemm_supported(), reason="Requires deep_gemm kernels") @pytest.mark.parametrize("E", [16, 32]) # number of experts @pytest.mark.parametrize("T", [256, 512]) # tokens per expert @pytest.mark.parametrize("K", [128, 256]) # hidden dim @pytest.mark.parametrize("N", [512, 1024]) # intermediate dim per expert @pytest.mark.parametrize("topk", [2, 4]) def test_batched_deepgemm_vs_triton( E: int, T: int, K: int, N: int, topk: int, monkeypatch, workspace_init ): """Compare BatchedDeepGemmExperts to BatchedTritonExperts.""" monkeypatch.setenv("VLLM_USE_DEEP_GEMM", "1") device = "cuda" w1, w2, w1_s, w2_s = make_block_quant_fp8_weights(E, N, K, BLOCK_SIZE) M = E * T # total tokens a = torch.randn(M, K, device=device, dtype=torch.bfloat16) / 10.0 fp8_info = torch.finfo(torch.float8_e4m3fn) a.clamp_(fp8_info.min, fp8_info.max) # random router outputs → top-k indices / weights router_logits = torch.randn(M, E, device=device, dtype=torch.float32) topk_weights, topk_ids = torch.topk(router_logits, k=topk, dim=-1) topk_weights = torch.nn.functional.softmax(topk_weights, dim=-1) # token number for each expert cnt = torch.bincount(topk_ids.flatten(), minlength=E) max_cnt = int(cnt.max().item()) # next power of 2 for max token number max_num_tokens = 1 << (max_cnt - 1).bit_length() prep_finalize = BatchedPrepareAndFinalize( max_num_tokens=max_num_tokens, num_local_experts=E, num_dispatchers=1, rank=0, ) quant_config = fp8_w8a8_moe_quant_config( w1_scale=w1_s, w2_scale=w2_s, per_act_token_quant=False, block_shape=BLOCK_SIZE, ) # triton (reference) triton_experts = BatchedTritonExperts( max_num_tokens=max_num_tokens, num_dispatchers=1, quant_config=quant_config, ) mk_triton = FusedMoEModularKernel(prep_finalize, triton_experts) out_triton = mk_triton( hidden_states=a, w1=w1, w2=w2, topk_weights=topk_weights, topk_ids=topk_ids, inplace=False, global_num_experts=E, ) # deepgemm deepgemm_experts = BatchedDeepGemmExperts( max_num_tokens=max_num_tokens, num_dispatchers=1, quant_config=quant_config, ) mk_deepgemm = FusedMoEModularKernel(prep_finalize, deepgemm_experts) out_deepgemm = mk_deepgemm( hidden_states=a, w1=w1, w2=w2, topk_weights=topk_weights, topk_ids=topk_ids, inplace=False, global_num_experts=E, ) diff = calc_diff(out_deepgemm, out_triton) assert diff < 1e-3, f"Output diff too large: {diff}"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_count_expert_num_tokens.py
tests/kernels/moe/test_count_expert_num_tokens.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Tests compute_expert_num_tokens kernels """ import dataclasses import pytest import torch from vllm.model_executor.layers.fused_moe.utils import count_expert_num_tokens @dataclasses.dataclass class TestTensors: topk_ids: torch.Tensor expert_map: torch.Tensor | None = None def to_device(self, device: str): self.topk_ids = self.topk_ids.to(device=device) if self.expert_map is not None: self.expert_map = self.expert_map.to(device=device) @staticmethod def make( num_tokens: int, num_topk: int, num_experts: int, device: str, topk_ids_dtype: torch.dtype, ) -> "TestTensors": # make topk ids topk_ids = torch.empty((num_tokens, num_topk), device=device, dtype=torch.int64) for x in range(num_tokens): topk_ids[x] = torch.randperm(num_experts)[:num_topk] topk_ids = topk_ids.to(dtype=torch.int64) return TestTensors(topk_ids=topk_ids) def with_ep_rank( self, ep_rank: int, num_global_experts: int, num_local_experts: int, device: str ): # make an expert map expert_map = torch.empty((num_global_experts), device=device, dtype=torch.int32) expert_map.fill_(-1) s = ep_rank * num_local_experts e = s + num_local_experts expert_map[s:e] = torch.tensor(list(range(num_local_experts)), device=device) return TestTensors(topk_ids=self.topk_ids.clone(), expert_map=expert_map) def ref_impl(tt: TestTensors, expert_num_tokens: torch.Tensor): # do the reference in cpu tt.to_device("cpu") expert_ids, counts = tt.topk_ids.unique(return_counts=True) for eid, count in zip(expert_ids, counts): if eid != -1 and tt.expert_map is not None: eid = tt.expert_map[eid] if eid == -1: continue expert_num_tokens[eid] += count def do_test_compute_expert_num_tokens( num_tokens: int, num_topk: int, num_experts: int, ep_size: int, topk_ids_dtype: torch.dtype, ): assert num_topk <= num_experts tt = TestTensors.make( num_tokens, num_topk, num_experts, topk_ids_dtype=topk_ids_dtype, device="cpu" ) num_global_experts = num_experts assert num_global_experts % ep_size == 0 num_local_experts = num_global_experts // ep_size for ep_rank in range(ep_size): tt_rank = tt.with_ep_rank(ep_rank, num_global_experts, num_local_experts, "cpu") ref_expert_num_tokens = torch.zeros( (num_local_experts), device="cpu", dtype=torch.int32 ) ref_impl(tt_rank, ref_expert_num_tokens) ref_expert_num_tokens = ref_expert_num_tokens.to("cuda") tt_rank.to_device("cuda") # Test with expert_map triton_expert_num_tokens_w_emap = count_expert_num_tokens( tt_rank.topk_ids, num_local_experts, tt_rank.expert_map ) # Test without expert map topk_ids = tt_rank.expert_map[tt_rank.topk_ids].to(topk_ids_dtype) triton_expert_num_tokens_wo_emap = count_expert_num_tokens( topk_ids, num_local_experts, expert_map=None ) torch.testing.assert_close( ref_expert_num_tokens, triton_expert_num_tokens_w_emap, atol=0, rtol=0 ) torch.testing.assert_close( ref_expert_num_tokens, triton_expert_num_tokens_wo_emap, atol=0, rtol=0 ) @pytest.mark.parametrize("num_tokens", [1, 4, 8, 11, 127, 128, 3333, 7317]) @pytest.mark.parametrize("num_topk", [2, 6, 8]) @pytest.mark.parametrize("num_experts", [64]) @pytest.mark.parametrize("ep_size", [1, 2, 4]) @pytest.mark.parametrize("topk_ids_dtype", [torch.int64]) def test_compute_expert_num_tokens( num_tokens: int, num_topk: int, num_experts: int, ep_size: int, topk_ids_dtype: torch.dtype, ): do_test_compute_expert_num_tokens( num_tokens, num_topk, num_experts, ep_size, topk_ids_dtype ) @pytest.mark.parametrize("numel", list(range(1, 8192, 111))) @pytest.mark.parametrize("num_experts", [32]) @pytest.mark.parametrize("ep_size", [2]) @pytest.mark.parametrize("topk_ids_dtype", [torch.int64]) def test_compute_expert_num_tokens_from_numel( numel: int, num_experts: int, ep_size: int, topk_ids_dtype: torch.dtype ): do_test_compute_expert_num_tokens( num_tokens=numel, num_topk=1, num_experts=num_experts, ep_size=ep_size, topk_ids_dtype=topk_ids_dtype, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/utils.py
tests/kernels/moe/utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import torch import vllm._custom_ops as ops from tests.kernels.quant_utils import per_block_cast_to_int8 from tests.kernels.quantization.nvfp4_utils import FLOAT4_E2M1_MAX, FLOAT8_E4M3_MAX from vllm.model_executor.layers.activation import SiluAndMul from vllm.model_executor.layers.fused_moe import fused_experts, fused_topk from vllm.model_executor.layers.fused_moe.config import FusedMoEQuantConfig from vllm.model_executor.layers.fused_moe.fused_batched_moe import ( BatchedPrepareAndFinalize, BatchedTritonExperts, NaiveBatchedExperts, ) from vllm.model_executor.layers.fused_moe.modular_kernel import FusedMoEModularKernel from vllm.model_executor.layers.fused_moe.utils import moe_kernel_quantize_input from vllm.utils.deep_gemm import per_block_cast_to_fp8 from vllm.utils.math_utils import round_up def triton_moe( a: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, topk_weight: torch.Tensor, topk_ids: torch.Tensor, w1_scale: torch.Tensor | None = None, w2_scale: torch.Tensor | None = None, a1_scale: torch.Tensor | None = None, a2_scale: torch.Tensor | None = None, quant_dtype: torch.dtype | None = None, per_act_token_quant=False, block_shape: list[int] | None = None, ) -> torch.Tensor: quant_config = FusedMoEQuantConfig.make( quant_dtype, per_act_token_quant=per_act_token_quant, block_shape=block_shape, w1_scale=w1_scale, w2_scale=w2_scale, a1_scale=a1_scale, a2_scale=a2_scale, ) return fused_experts(a, w1, w2, topk_weight, topk_ids, quant_config=quant_config) def batched_moe( a: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, topk_weight: torch.Tensor, topk_ids: torch.Tensor, w1_scale: torch.Tensor | None = None, w2_scale: torch.Tensor | None = None, a1_scale: torch.Tensor | None = None, a2_scale: torch.Tensor | None = None, quant_dtype: torch.dtype | None = None, per_act_token_quant: bool = False, block_shape: list[int] | None = None, ) -> torch.Tensor: max_num_tokens = round_up(a.shape[0], 64) quant_config = FusedMoEQuantConfig.make( quant_dtype, per_act_token_quant=per_act_token_quant, block_shape=block_shape, w1_scale=w1_scale, w2_scale=w2_scale, a1_scale=a1_scale, a2_scale=a2_scale, ) fused_experts = FusedMoEModularKernel( BatchedPrepareAndFinalize( max_num_tokens, num_dispatchers=1, num_local_experts=w1.shape[0], rank=0 ), BatchedTritonExperts( max_num_tokens=max_num_tokens, num_dispatchers=1, quant_config=quant_config, ), ) return fused_experts(a, w1, w2, topk_weight, topk_ids) def naive_batched_moe( a: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, topk_weight: torch.Tensor, topk_ids: torch.Tensor, w1_scale: torch.Tensor | None = None, w2_scale: torch.Tensor | None = None, a1_scale: torch.Tensor | None = None, a2_scale: torch.Tensor | None = None, quant_dtype: torch.dtype | None = None, per_act_token_quant: bool = False, block_shape: list[int] | None = None, ) -> torch.Tensor: max_num_tokens = round_up(a.shape[0], 64) quant_config = FusedMoEQuantConfig.make( quant_dtype, per_act_token_quant=per_act_token_quant, block_shape=block_shape, w1_scale=w1_scale, w2_scale=w2_scale, a1_scale=a1_scale, a2_scale=a2_scale, ) fused_experts = FusedMoEModularKernel( BatchedPrepareAndFinalize( max_num_tokens, num_dispatchers=1, num_local_experts=w1.shape[0], rank=0 ), NaiveBatchedExperts( max_num_tokens=max_num_tokens, num_dispatchers=1, quant_config=quant_config, ), ) return fused_experts(a, w1, w2, topk_weight, topk_ids) def chunk_scales( scales: torch.Tensor | None, start: int, end: int ) -> torch.Tensor | None: if scales is not None: if scales.numel() == 1: return scales else: return scales[start:end] return None def make_quantized_test_activations( E: int, m: int, k: int, in_dtype: torch.dtype, quant_dtype: torch.dtype | None = None, block_shape: list[int] | None = None, per_act_token_quant: bool = False, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor | None]: a = torch.randn((E, m, k), device="cuda", dtype=in_dtype) / 10 a_q = a a_scale = None if quant_dtype is not None: assert quant_dtype == torch.float8_e4m3fn or quant_dtype == torch.int8, ( "only fp8/int8 supported" ) a_q = torch.zeros_like(a, dtype=quant_dtype) a_scale_l = [None] * E for e in range(E): a_q[e], a_scale_l[e] = moe_kernel_quantize_input( a[e], None, quant_dtype, per_act_token_quant, block_shape ) a_scale = torch.stack(a_scale_l) if not per_act_token_quant and block_shape is None: a_scale = a_scale.view(E, 1, 1) return a, a_q, a_scale def moe_quantize_weights( w: torch.Tensor, w_s: torch.Tensor | None, quant_dtype: torch.dtype | str | None, per_token_quant: bool, block_shape: list[int] | None, ) -> tuple[torch.Tensor, torch.Tensor | None, torch.Tensor | None]: assert ( quant_dtype == torch.float8_e4m3fn or quant_dtype == torch.int8 or quant_dtype == "nvfp4" ), "only fp8/int8/nvfp4 supported" w_gs = None if block_shape is not None: assert not per_token_quant if quant_dtype == torch.int8: w, w_s = per_block_cast_to_int8(w, block_shape) elif quant_dtype == torch.float8_e4m3fn: w, w_s = per_block_cast_to_fp8(w, block_shape) elif quant_dtype == "nvfp4": raise RuntimeError("blocked quantization not supported for nvfp4") else: raise RuntimeError(f"Unsupported quant type {quant_dtype}") else: if quant_dtype == torch.int8: w, w_s = ops.scaled_int8_quant( w, w_s, use_per_token_if_dynamic=per_token_quant ) elif quant_dtype == torch.float8_e4m3fn: w, w_s = ops.scaled_fp8_quant( w, w_s, use_per_token_if_dynamic=per_token_quant ) elif quant_dtype == "nvfp4": assert not per_token_quant w_amax = torch.abs(w).max().to(torch.float32) w_gs = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / w_amax w, w_s = ops.scaled_fp4_quant(w, w_gs) else: raise RuntimeError(f"Unsupported quant type {quant_dtype}") return w, w_s, w_gs def make_test_weight( e: int, rows: int, cols: int, in_dtype: torch.dtype = torch.bfloat16, quant_dtype: torch.dtype | str | None = None, block_shape: list[int] | None = None, per_out_ch_quant: bool = False, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor | None, torch.Tensor | None]: w_16 = torch.randn((e, rows, cols), device="cuda", dtype=in_dtype) / 15 w_gs = None if quant_dtype is not None: w_l = [None] * e w_s_l = [None] * e w_gs_l = [None] * e for idx in range(e): w_l[idx], w_s_l[idx], w_gs_l[idx] = moe_quantize_weights( w_16[idx], None, quant_dtype, per_out_ch_quant, block_shape ) w = torch.stack(w_l) w_s = torch.stack(w_s_l) if e > 0 and w_gs_l[0] is not None: w_gs = torch.stack(w_gs_l) if w_s.ndim == 2: assert w_s.shape[-1] == 1 w_s = w_s.view(-1, 1, 1) if block_shape is not None: block_n, block_k = block_shape n_tiles = (rows + block_n - 1) // block_n k_tiles = (cols + block_k - 1) // block_k assert w_s.shape == (e, n_tiles, k_tiles) else: w = w_16 w_s = None w_gs = None return w_16, w, w_s, w_gs def make_test_weights( e: int, n: int, k: int, in_dtype: torch.dtype = torch.bfloat16, quant_dtype: torch.dtype | str | None = None, block_shape: list[int] | None = None, per_out_ch_quant: bool = False, make_gate: bool = True, ) -> tuple[ tuple[torch.Tensor, torch.Tensor, torch.Tensor | None, torch.Tensor | None], tuple[torch.Tensor, torch.Tensor, torch.Tensor | None, torch.Tensor | None], ]: return ( make_test_weight( e, (2 if make_gate else 1) * n, k, in_dtype, quant_dtype, block_shape, per_out_ch_quant, ), make_test_weight(e, k, n, in_dtype, quant_dtype, block_shape, per_out_ch_quant), ) def per_token_cast_to_fp8( x: torch.Tensor, block_size: int = 128 ) -> tuple[torch.Tensor, torch.Tensor]: assert x.dim() == 2 m, n = x.shape pad_size = (block_size - (n % block_size)) % block_size x = torch.nn.functional.pad(x, (0, pad_size), value=0) if pad_size > 0 else x x_view = x.view(m, -1, block_size) x_amax = x_view.abs().float().amax(dim=2).view(m, -1).clamp(1e-4) fp8_data = (x_view * (448.0 / x_amax.unsqueeze(2))).to(torch.float8_e4m3fn) return fp8_data.view(m, n + pad_size)[:, :n], (x_amax / 448.0).view(m, -1) def make_test_quant_config( e: int, n: int, k: int, in_dtype: torch.dtype, quant_dtype: torch.dtype | str | None = None, per_act_token_quant: bool = False, block_shape: list[int] | None = None, make_gate: bool = True, ) -> tuple[torch.Tensor, torch.Tensor, FusedMoEQuantConfig]: (_, w1, w1_s, w1_gs), (_, w2, w2_s, w2_gs) = make_test_weights( e, n, k, in_dtype, quant_dtype, per_out_ch_quant=per_act_token_quant, block_shape=block_shape, make_gate=make_gate, ) # Hacky/trivial scales for nvfp4. a1_gscale: torch.Tensor | None = None a2_gscale: torch.Tensor | None = None if quant_dtype == "nvfp4": a1_gscale = torch.ones((e,), device="cuda", dtype=torch.float32) a2_gscale = torch.ones((e,), device="cuda", dtype=torch.float32) a1_scale = a1_gscale a2_scale = a2_gscale else: a1_scale = None a2_scale = None return ( w1, w2, FusedMoEQuantConfig.make( quant_dtype, per_act_token_quant=per_act_token_quant, block_shape=block_shape, w1_scale=w1_s, w2_scale=w2_s, a1_gscale=a1_gscale, a2_gscale=a2_gscale, a1_scale=a1_scale, a2_scale=a2_scale, # TODO: make sure this is handled properly g1_alphas=(1 / w1_gs) if w1_gs is not None else None, g2_alphas=(1 / w2_gs) if w2_gs is not None else None, ), ) def fused_moe( hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, score: torch.Tensor, topk: int, renormalize: bool = False, quant_config: FusedMoEQuantConfig | None = None, global_num_experts: int = -1, expert_map: torch.Tensor | None = None, ) -> torch.Tensor: topk_weights, topk_ids, _ = fused_topk( hidden_states, score.float(), topk, renormalize ) return fused_experts( hidden_states, w1, w2, topk_weights, topk_ids, global_num_experts=global_num_experts, expert_map=expert_map, quant_config=quant_config, ) # CustomOp? class BaselineMM(torch.nn.Module): def __init__( self, b: torch.Tensor, out_dtype: torch.dtype, ): super().__init__() self.b = b.to(dtype=torch.float32) self.out_dtype = out_dtype def forward(self, a: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor | None]: return torch.mm(a.to(dtype=torch.float32), self.b).to(self.out_dtype), None class TestMLP(torch.nn.Module): def __init__( self, w1: torch.Tensor, w2: torch.Tensor, out_dtype: torch.dtype, ): super().__init__() self.gate_up_proj = BaselineMM(w1, out_dtype) self.down_proj = BaselineMM(w2, out_dtype) self.act_fn = SiluAndMul() def forward(self, x): x, _ = self.gate_up_proj(x) x = self.act_fn(x) x, _ = self.down_proj(x) return x def make_naive_shared_experts( N: int, K: int, in_dtype: torch.dtype = torch.bfloat16, ) -> torch.nn.Module: w1 = torch.randn((K, N * 2), device="cuda", dtype=in_dtype) / 15 w2 = torch.randn((N, K), device="cuda", dtype=in_dtype) / 15 return TestMLP(w1, w2, out_dtype=in_dtype) class RealMLP(torch.nn.Module): def __init__( self, hidden_size: int, intermediate_size: int, w1: torch.Tensor, w2: torch.Tensor, hidden_act: str = "silu", quant_config=None, reduce_results: bool = True, prefix: str = "", w1_s: torch.Tensor | None = None, w2_s: torch.Tensor | None = None, ) -> None: from vllm.model_executor.layers.linear import ( MergedColumnParallelLinear, RowParallelLinear, ) super().__init__() self.gate_up_proj = MergedColumnParallelLinear( hidden_size, [intermediate_size] * 2, bias=False, quant_config=quant_config, prefix=f"{prefix}.gate_up_proj", ) self.gate_up_proj.register_parameter( "weight", torch.nn.Parameter(w1, requires_grad=False) ) self.gate_up_proj.register_parameter( "weight_scale", torch.nn.Parameter(w1_s, requires_grad=False) ) self.gate_up_proj.register_parameter( "input_scale", None ) # torch.nn.Parameter(None, requires_grad=False)) self.down_proj = RowParallelLinear( intermediate_size, hidden_size, bias=False, quant_config=quant_config, reduce_results=reduce_results, prefix=f"{prefix}.down_proj", ) self.down_proj.register_parameter( "weight", torch.nn.Parameter(w2, requires_grad=False) ) self.down_proj.register_parameter( "weight_scale", torch.nn.Parameter(w2_s, requires_grad=False) ) self.down_proj.register_parameter( "input_scale", None ) # torch.nn.Parameter(None, requires_grad=False)) if hidden_act != "silu": raise ValueError( f"Unsupported activation: {hidden_act}. Only silu is supported for now." ) self.act_fn = SiluAndMul() def forward(self, x): gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) return x def make_shared_experts( N: int, K: int, in_dtype: torch.dtype = torch.bfloat16, quant_dtype: torch.dtype | str | None = None, ) -> torch.nn.Module: from vllm.model_executor.layers.quantization.fp8 import Fp8Config (_, w1, w1_s, _), (_, w2, w2_s, _) = make_test_weights( 1, N, K, in_dtype=in_dtype, quant_dtype=quant_dtype, ) old_dtype = torch.get_default_dtype() try: torch.set_default_dtype(in_dtype) if quant_dtype == torch.float8_e4m3fn: w1 = w1[0].transpose(0, 1) w2 = w2[0].transpose(0, 1) w1_s = w1_s[0].transpose(0, 1) if w1_s is not None else None w2_s = w2_s[0].transpose(0, 1) if w2_s is not None else None quant_config = Fp8Config(True) else: w1 = w1[0] w2 = w2[0] w1_s = None w2_s = None quant_config = None return RealMLP(K, N, w1, w2, "silu", quant_config, w1_s=w1_s, w2_s=w2_s) finally: torch.set_default_dtype(old_dtype)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_moe_align_block_size.py
tests/kernels/moe/test_moe_align_block_size.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Tests for the MOE align block size function. Run `pytest tests/kernels/moe/test_moe_align_block_size.py`. """ import pytest import torch from vllm.model_executor.layers.fused_moe.moe_align_block_size import ( batched_moe_align_block_size, moe_align_block_size, ) from vllm.platforms import current_platform from vllm.utils.math_utils import round_up NUM_TOKENS = [1, 3, 256, 2256, 4096] NUM_EXPERTS = [32, 160, 256, 257] TOP_KS = [1, 2, 16, 32] BLOCK_SIZES = [32, 128] current_platform.seed_everything(0) def _group_tokens_by_expert( sorted_ids: torch.Tensor, expert_ids: torch.Tensor, block_size: int, valid_length: int, total_tokens: int, ) -> dict: num_blocks = valid_length // block_size expert_tokens: dict[int, list[int]] = {} for block_idx in range(num_blocks): expert_id = expert_ids[block_idx].item() block_start = block_idx * block_size block_end = min(block_start + block_size, valid_length) block_tokens = sorted_ids[block_start:block_end] valid_tokens = block_tokens[block_tokens < total_tokens] if expert_id not in expert_tokens: expert_tokens[expert_id] = [] expert_tokens[expert_id].extend(valid_tokens.tolist()) return expert_tokens def _verify_expert_level_sorting( actual_sorted_ids: torch.Tensor, golden_sorted_ids: torch.Tensor, expert_ids: torch.Tensor, block_size: int, valid_length: int, total_tokens: int, ): """ Verify that actual_sorted_ids follows the correct expert-level sorting. The kerne limplementation may or may not preserve original token order in topk_ids in the final sorted_ids however this does not impact quality. """ # Group tokens by expert from the golden implementation golden_expert_tokens = _group_tokens_by_expert( golden_sorted_ids, expert_ids, block_size, valid_length, total_tokens ) actual_expert_tokens = _group_tokens_by_expert( actual_sorted_ids, expert_ids, block_size, valid_length, total_tokens ) assert set(golden_expert_tokens.keys()) == set(actual_expert_tokens.keys()), ( f"Expert IDs mismatch: golden={set(golden_expert_tokens.keys())}, " f"actual={set(actual_expert_tokens.keys())}" ) for expert_id in golden_expert_tokens: golden_tokens = torch.tensor( golden_expert_tokens[expert_id], device=actual_sorted_ids.device ) actual_tokens = torch.tensor( actual_expert_tokens[expert_id], device=actual_sorted_ids.device ) assert torch.equal( torch.sort(golden_tokens)[0], torch.sort(actual_tokens)[0] ), ( f"Expert {expert_id} token mismatch: " f"golden={golden_expert_tokens[expert_id]}, " f"actual={actual_expert_tokens[expert_id]}" ) def torch_moe_align_block_size( topk_ids: torch.Tensor, block_size: int, num_experts: int, expert_map: torch.Tensor | None = None, pad_sorted_ids: bool = False, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Golden torch implementation of moe_align_block_size. This function aligns the token distribution across experts to be compatible with block size for matrix multiplication by sorting tokens by expert and padding to block boundaries. """ max_num_tokens_padded = topk_ids.numel() + num_experts * (block_size - 1) if pad_sorted_ids: max_num_tokens_padded = round_up(max_num_tokens_padded, block_size) if topk_ids.numel() < num_experts: max_num_tokens_padded = topk_ids.numel() * block_size flattened_token_indices = torch.arange( topk_ids.numel(), device=topk_ids.device, dtype=torch.int32 ) flattened_expert_ids = topk_ids.flatten() sorted_expert_ids, sort_indices = torch.sort(flattened_expert_ids, stable=True) sorted_token_indices = flattened_token_indices[sort_indices] expert_token_counts = torch.zeros( num_experts, dtype=torch.int64, device=topk_ids.device ) for expert_id in range(num_experts): mask = sorted_expert_ids == expert_id expert_token_counts[expert_id] = mask.sum() expert_padded_counts = torch.zeros( num_experts, dtype=torch.int64, device=topk_ids.device ) for expert_id in range(num_experts): original_count = expert_token_counts[expert_id] if expert_map is not None and expert_map[expert_id] == -1: continue if original_count > 0: expert_padded_counts[expert_id] = ( (original_count + block_size - 1) // block_size ) * block_size sorted_token_ids = torch.full( (max_num_tokens_padded,), topk_ids.numel(), dtype=torch.int32, device=topk_ids.device, ) max_num_blocks = (max_num_tokens_padded + block_size - 1) // block_size expert_ids = torch.zeros(max_num_blocks, dtype=torch.int32, device=topk_ids.device) current_pos = 0 current_block = 0 for expert_id in range(num_experts): if expert_map is not None and expert_map[expert_id] == -1: continue expert_mask = sorted_expert_ids == expert_id expert_tokens = sorted_token_indices[expert_mask] num_expert_tokens = expert_tokens.shape[0] if num_expert_tokens > 0: sorted_token_ids[current_pos : current_pos + num_expert_tokens] = ( expert_tokens ) expert_blocks_needed = expert_padded_counts[expert_id] // block_size expert_id_new = expert_id if expert_map is not None: expert_id_new = expert_map[expert_id] expert_ids[current_block : current_block + expert_blocks_needed] = ( expert_id_new ) current_pos += expert_padded_counts[expert_id] current_block += expert_blocks_needed total_padded_tokens = expert_padded_counts.sum() num_tokens_post_pad = torch.tensor( [total_padded_tokens], dtype=torch.int32, device=topk_ids.device ) return sorted_token_ids, expert_ids, num_tokens_post_pad @pytest.mark.parametrize("m", NUM_TOKENS) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("num_experts", NUM_EXPERTS) @pytest.mark.parametrize("block_size", BLOCK_SIZES) @pytest.mark.parametrize("pad_sorted_ids", [False, True]) @pytest.mark.skipif(current_platform.is_rocm(), reason="Skip for rocm") def test_moe_align_block_size( m: int, topk: int, num_experts: int, block_size: int, pad_sorted_ids: bool ): """Test moe_align_block_size without expert mapping""" topk_ids = torch.zeros((m, topk), device="cuda", dtype=torch.int32) for i in range(m): experts = torch.randperm(num_experts, device="cuda")[:topk] topk_ids[i] = experts actual_sorted_ids, actual_expert_ids, actual_num_tokens = moe_align_block_size( topk_ids=topk_ids, block_size=block_size, num_experts=num_experts, pad_sorted_ids=pad_sorted_ids, ) golden_sorted_ids, golden_expert_ids, golden_num_tokens = ( torch_moe_align_block_size( topk_ids=topk_ids, block_size=block_size, num_experts=num_experts, pad_sorted_ids=pad_sorted_ids, ) ) torch.testing.assert_close(actual_num_tokens, golden_num_tokens, atol=0, rtol=0) torch.testing.assert_close(actual_expert_ids, golden_expert_ids, atol=0, rtol=0) # For sorted_token_ids, verify block-level correctness rather than exact # order Tokens within each expert's blocks can be in any order, but expert # regions must be correct _verify_expert_level_sorting( actual_sorted_ids, golden_sorted_ids, actual_expert_ids, block_size, actual_num_tokens.item(), m * topk, ) total_tokens = m * topk assert actual_num_tokens.item() % block_size == 0, ( "num_tokens_post_pad should be divisible by block_size" ) assert actual_num_tokens.item() >= total_tokens, ( "num_tokens_post_pad should be at least total_tokens" ) valid_tokens = actual_sorted_ids[actual_sorted_ids < total_tokens] assert len(valid_tokens) == total_tokens, ( f"Should have exactly {total_tokens} valid tokens, got {len(valid_tokens)}" ) assert (actual_expert_ids >= 0).all() and (actual_expert_ids < num_experts).all(), ( "expert_ids should contain valid expert indices" ) @pytest.mark.parametrize("m", [16, 32, 2048]) @pytest.mark.parametrize("topk", [2, 4]) @pytest.mark.parametrize("num_experts", [8, 64]) @pytest.mark.parametrize("block_size", [64]) @pytest.mark.skipif(current_platform.is_rocm(), reason="Skip for rocm") def test_moe_align_block_size_with_expert_map( m: int, topk: int, num_experts: int, block_size: int ): """Test moe_align_block_size with expert mapping (EP scenario)""" topk_ids = torch.zeros((m, topk), device="cuda", dtype=torch.int32) for i in range(m): experts = torch.randperm(num_experts, device="cuda")[:topk] topk_ids[i] = experts expert_map = torch.full((num_experts,), -1, device="cuda", dtype=torch.int32) local_experts = list(range(0, num_experts, 2)) for i, expert_id in enumerate(local_experts): expert_map[expert_id] = i actual_sorted_ids, actual_expert_ids, actual_num_tokens = moe_align_block_size( topk_ids=topk_ids, block_size=block_size, num_experts=num_experts, expert_map=expert_map, ignore_invalid_experts=True, ) golden_sorted_ids, golden_expert_ids, golden_num_tokens = ( torch_moe_align_block_size( topk_ids=topk_ids, block_size=block_size, num_experts=num_experts, expert_map=expert_map, ) ) torch.testing.assert_close(actual_num_tokens, golden_num_tokens, atol=0, rtol=0) torch.testing.assert_close(actual_expert_ids, golden_expert_ids, atol=0, rtol=0) _verify_expert_level_sorting( actual_sorted_ids, golden_sorted_ids, actual_expert_ids, block_size, actual_num_tokens.item(), m * topk, ) def test_moe_align_block_size_deterministic(): m, topk, num_experts, block_size = 128, 2, 32, 64 torch.manual_seed(42) topk_ids = torch.randint( 0, num_experts, (m, topk), device="cuda", dtype=torch.int32 ) # expect the results to be reproducible results = [] for _ in range(5): sorted_ids, expert_ids, num_tokens = moe_align_block_size( topk_ids=topk_ids, block_size=block_size, num_experts=num_experts ) results.append((sorted_ids.clone(), expert_ids.clone(), num_tokens.clone())) for i in range(1, len(results)): assert torch.equal(results[0][0], results[i][0]), ( "sorted_ids should be deterministic" ) assert torch.equal(results[0][1], results[i][1]), ( "expert_ids should be deterministic" ) assert torch.equal(results[0][2], results[i][2]), ( "num_tokens should be deterministic" ) @pytest.mark.parametrize("max_tokens_per_batch", [13, 16, 512]) @pytest.mark.parametrize("num_experts", [8, 16, 32, 64]) @pytest.mark.parametrize("block_size", [8, 16, 32, 64]) @pytest.mark.parametrize("simulate_empty_batches", [False, True]) def test_batched_moe_align_block_size( max_tokens_per_batch: int, num_experts: int, block_size: int, simulate_empty_batches: bool, ): def ref_outputs( expert_num_tokens: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: E = expert_num_tokens.size(0) # Round up so each batch can be split to blocks evenly. Msum = round_up(max_tokens_per_batch, block_size) * E ref_sorted_ids = torch.empty((Msum,), dtype=torch.int32) ref_expert_ids = torch.empty((Msum // block_size,), dtype=torch.int32) ref_num_tokens_post_pad = torch.empty((1,), dtype=torch.int32) # Intialize sentinel = E * max_tokens_per_batch ref_sorted_ids.fill_(sentinel) ref_expert_ids.fill_(-1) # Fill ref_sorted_ids i = 0 for expert_id, expert_nt in enumerate(expert_num_tokens): token_offset = expert_id * max_tokens_per_batch for j in range(expert_nt): ref_sorted_ids[i] = token_offset + j i += 1 # round up i to the next block_size i = round_up(i, block_size) ref_num_tokens_post_pad[0] = i # Fill expert_ids nt_ceil_sum = 0 for expert_id, expert_nt in enumerate(expert_num_tokens): expert_ids_offset = nt_ceil_sum // block_size ceil_expert_nt = round_up(int(expert_nt.item()), block_size) num_blocks = ceil_expert_nt // block_size for x in range(num_blocks): ref_expert_ids[expert_ids_offset + x] = expert_id nt_ceil_sum += ceil_expert_nt return ( ref_sorted_ids.to("cuda"), ref_expert_ids.to("cuda"), ref_num_tokens_post_pad.to("cuda"), ) # Compute expert_num_tokens expert_num_tokens = torch.randint( low=0, high=max_tokens_per_batch, size=(num_experts,), device="cpu", dtype=torch.int32, ) if simulate_empty_batches: # mark half the batches to have 0 tokens zero_batches = torch.randperm(num_experts)[: num_experts // 2] expert_num_tokens[zero_batches] = 0 # ref outputs ref_sorted_ids, ref_expert_ids, ref_num_tokens_post_pad = ref_outputs( expert_num_tokens ) # outputs sorted_ids, expert_ids, num_tokens_post_pad = batched_moe_align_block_size( max_tokens_per_batch, block_size, expert_num_tokens.to("cuda") ) assert ref_sorted_ids.size() == sorted_ids.size(), ( f"{ref_sorted_ids.size()} vs {sorted_ids.size()}" ) assert ref_expert_ids.size() == expert_ids.size(), ( f"{ref_expert_ids.size()} vs {expert_ids.size()}" ) assert ref_num_tokens_post_pad.size() == num_tokens_post_pad.size(), ( f"{ref_num_tokens_post_pad.size()} vs {num_tokens_post_pad.size()}" ) torch.testing.assert_close(ref_sorted_ids, sorted_ids, atol=0, rtol=0) torch.testing.assert_close(ref_expert_ids, expert_ids, atol=0, rtol=0) torch.testing.assert_close( ref_num_tokens_post_pad, num_tokens_post_pad, atol=0, rtol=0 )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_block_fp8.py
tests/kernels/moe/test_block_fp8.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from tests.kernels.moe.utils import make_test_quant_config, make_test_weights from tests.kernels.quant_utils import ( native_per_token_group_quant_fp8, native_w8a8_block_matmul, ) from vllm.config import VllmConfig, set_current_vllm_config from vllm.model_executor.layers.activation import SiluAndMul from vllm.model_executor.layers.fused_moe import fused_experts from vllm.model_executor.layers.fused_moe.deep_gemm_moe import ( _valid_deep_gemm_shape, deep_gemm_moe_fp8, ) from vllm.model_executor.layers.fused_moe.fused_moe import ( fused_topk, modular_triton_fused_moe, ) from vllm.platforms import current_platform from vllm.utils.deep_gemm import ( get_mk_alignment_for_contiguous_layout, is_deep_gemm_e8m0_used, ) from vllm.utils.import_utils import has_deep_gemm dg_available = has_deep_gemm() if current_platform.get_device_capability() < (9, 0): pytest.skip("FP8 Triton requires CUDA 9.0 or higher", allow_module_level=True) if current_platform.is_fp8_fnuz(): pytest.skip( "Tests in this file require float8_e4m3fn and platform does not support", allow_module_level=True, ) vllm_config = VllmConfig() # Test configurations DTYPES = [torch.bfloat16] # [torch.half, torch.bfloat16, torch.float32] # Deepseek-V3's intermediate size 18432, so N is 18432*2/8=4608 at TP8 # and its hidden size is 7168. MNK_FACTORS = [ (1, 128, 128), (1, 128, 7168), (1, 1024, 7168), (1, 4608, 128), (1, 4608, 7168), (83, 128, 128), (83, 512, 512), (83, 4608, 512), (83, 4608, 7168), (128, 512, 512), (128, 1024, 7168), (128, 4608, 7168), (2048, 128, 128), (2048, 1024, 7168), (2048, 4608, 512), (2048, 4608, 7168), (8192, 128, 128), (8192, 128, 7168), (8192, 1024, 7168), (8192, 4608, 7168), ] MNK_FACTORS_DG = [ (128, 128, 128), (128, 128, 7168), (128, 1024, 7168), (128, 4608, 128), (128, 4608, 7168), (192, 512, 512), (192, 1024, 7168), (192, 4608, 7168), (1335, 128, 128), (1335, 1024, 7168), (1335, 4608, 512), (1335, 4608, 7168), (2048, 128, 128), (2048, 128, 7168), (2048, 1024, 7168), (2048, 4608, 7168), ] BLOCK_SIZE = [[128, 128]] E = [2, 8, 16] # [128, 256] TOP_KS = [1, 2, 6] SEEDS = [0] def torch_w8a8_block_fp8_moe(a, w1, w2, w1_s, w2_s, topk_weight, topk_ids, block_shape): """Fused moe with block-wise quantization using native torch.""" B, D = a.shape topk = topk_ids.size(1) a = a.view(B, -1, D).repeat(1, topk, 1).reshape(-1, D) out = torch.zeros(B * topk, w2.shape[1], dtype=a.dtype, device=a.device) topk_weight = topk_weight.view(-1) topk_ids = topk_ids.view(-1) _, block_k = block_shape[0], block_shape[1] a_q, a_s = native_per_token_group_quant_fp8(a, block_k) a_q = a_q.to(torch.float32) for i in range(w1.shape[0]): mask = topk_ids == i if mask.sum(): inter_out = native_w8a8_block_matmul( a_q[mask], w1[i], a_s[mask], w1_s[i], block_shape, output_dtype=a.dtype ) act_out = SiluAndMul().forward_native(inter_out) act_out_q, act_out_s = native_per_token_group_quant_fp8(act_out, block_k) out[mask] = native_w8a8_block_matmul( act_out_q, w2[i], act_out_s, w2_s[i], block_shape, output_dtype=a.dtype ) return ( out.view(B, -1, w2.shape[1]) * topk_weight.view(B, -1, 1).to(out.dtype) ).sum(dim=1) # Skip all tests if CUDA is not available pytest.importorskip("torch.cuda") @pytest.fixture(autouse=True) def setup_cuda(): torch.set_default_device("cuda") @pytest.mark.parametrize(("M", "N", "K"), MNK_FACTORS) @pytest.mark.parametrize("E", E) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("block_size", BLOCK_SIZE) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @torch.inference_mode() def test_w8a8_block_fp8_fused_moe( M, N, K, E, topk, block_size, dtype, seed, monkeypatch, workspace_init ): if topk > E: pytest.skip(f"Skipping test; topk={topk} > E={E}") torch.manual_seed(seed) monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", "2048") a = torch.randn((M, K), dtype=dtype) / 10 score = torch.randn((M, E), dtype=dtype) w1, w2, quant_config = make_test_quant_config( E, N, K, dtype, quant_dtype=torch.float8_e4m3fn, per_act_token_quant=False, block_shape=block_size, ) m_fused_moe = modular_triton_fused_moe(quant_config) topk_weights, topk_ids, _ = fused_topk(a, score.float(), topk, False) # Set the context to avoid lots of warning spam. with set_current_vllm_config(vllm_config): ref_out = torch_w8a8_block_fp8_moe( a, w1, w2, quant_config.w1_scale, quant_config.w2_scale, topk_weights, topk_ids, block_size, ) out = fused_experts( a, w1, w2, topk_weights, topk_ids, quant_config=quant_config ) m_out = m_fused_moe(a, w1, w2, topk_weights, topk_ids) # 0.039 only needed for M >= 8192 tol = 0.035 if M < 8192 else 0.039 torch.testing.assert_close(out, ref_out, atol=tol, rtol=tol) torch.testing.assert_close(m_out, ref_out, atol=tol, rtol=tol) @pytest.mark.parametrize(("M", "N", "K"), MNK_FACTORS_DG) @pytest.mark.parametrize("E", E) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("seed", SEEDS) @pytest.mark.skipif(not dg_available, reason="DeepGemm kernels not available.") @pytest.mark.skipif(is_deep_gemm_e8m0_used(), reason="Not E8M0 scale MOE") @torch.inference_mode() def test_w8a8_block_fp8_deep_gemm_fused_moe(M, N, K, E, topk, seed, monkeypatch): if topk > E: pytest.skip(f"Skipping test: topk={topk} > E={E}") if not _valid_deep_gemm_shape(M, N, K): pytest.skip(f"Skipping test: invalid size m={M}, n={N}, k={K}") chunk_size = 1024 torch.manual_seed(seed) monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", str(chunk_size)) block_size = get_mk_alignment_for_contiguous_layout() dtype = torch.bfloat16 a = torch.randn((M, K), dtype=dtype) / 10 score = torch.randn((M, E), dtype=dtype) (_, w1, w1_s, _), (_, w2, w2_s, _) = make_test_weights( E, N, K, dtype, torch.float8_e4m3fn, per_out_ch_quant=False, block_shape=block_size, ) # Note: for now use_compile will error out if the problem size is # large enough to trigger chunking. I'm leaving the flag and # setup code in case we are able to revisit this later. use_compile = False use_cudagraph = ( chunk_size < M and N >= 1024 and K >= 1024 and current_platform.is_cuda_alike() ) topk_weights, topk_ids, _ = fused_topk(a, score.float(), topk, False) # Set the context to avoid lots of warning spam. with set_current_vllm_config(vllm_config): ref_out = torch_w8a8_block_fp8_moe( a, w1, w2, w1_s, w2_s, topk_weights, topk_ids, block_size ) if use_compile: deep_gemm_moe_fp8_fn = torch.compile( deep_gemm_moe_fp8, backend="inductor", fullgraph=True ) torch._dynamo.mark_dynamic(a, 0) torch._dynamo.mark_dynamic(topk_weights, 0) torch._dynamo.mark_dynamic(topk_ids, 0) else: deep_gemm_moe_fp8_fn = deep_gemm_moe_fp8 out = deep_gemm_moe_fp8_fn(a, w1, w2, w1_s, w2_s, topk_weights, topk_ids) if use_cudagraph: out.fill_(0) stream = torch.cuda.Stream() graph = torch.cuda.CUDAGraph() with torch.cuda.graph(graph, stream=stream): out = deep_gemm_moe_fp8_fn( a, w1, w2, w1_s, w2_s, topk_weights, topk_ids ) torch.cuda.synchronize() graph.replay() torch.cuda.synchronize() torch.testing.assert_close(out, ref_out, atol=0.035, rtol=0.035)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/__init__.py
tests/kernels/moe/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_ocp_mx_moe.py
tests/kernels/moe/test_ocp_mx_moe.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import importlib.metadata from dataclasses import dataclass from importlib.util import find_spec import pytest import torch from packaging import version from vllm.platforms import current_platform from vllm.utils.flashinfer import has_flashinfer QUARK_MXFP4_AVAILABLE = find_spec("quark") is not None and version.parse( importlib.metadata.version("amd-quark") ) >= version.parse("0.8.99") TRTLLM_GEN_MXFP4_AVAILABLE = ( current_platform.is_cuda() and current_platform.is_device_capability_family(100) ) HOPPER_MXFP4_BF16_AVAILABLE = ( current_platform.is_cuda() and current_platform.is_device_capability(90) and has_flashinfer() ) if TRTLLM_GEN_MXFP4_AVAILABLE: from flashinfer import ( fp4_quantize, mxfp8_quantize, next_positive_power_of_2, reorder_rows_for_gated_act_gemm, shuffle_matrix_a, shuffle_matrix_sf_a, trtllm_fp4_block_scale_moe, ) from flashinfer.fp4_quantization import nvfp4_block_scale_interleave from flashinfer.fused_moe.core import get_w2_permute_indices_with_cache @dataclass class ModelCase: model_id: str tp: int @pytest.fixture(scope="function", autouse=True) def enable_pickle(monkeypatch): """`LLM.apply_model` requires pickling a function.""" monkeypatch.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1") @pytest.mark.parametrize( "model_case", [ ModelCase("fxmarty/qwen_1.5-moe-a2.7b-mxfp4", tp=2), ModelCase("fxmarty/deepseek_r1_3_layers_mxfp4", tp=8), ModelCase("fxmarty/Llama-4-Scout-17B-16E-Instruct-2-layers-mxfp4", tp=1), ModelCase("fxmarty/Llama-3.1-70B-Instruct-2-layers-mxfp6", tp=1), ModelCase("fxmarty/Llama-3.1-70B-Instruct-2-layers-mxfp6", tp=4), ], ) @pytest.mark.skipif(not QUARK_MXFP4_AVAILABLE, reason="amd-quark>=0.9 is not available") def test_mxfp4_loading_and_execution_moe(vllm_runner, model_case: ModelCase): if torch.cuda.device_count() < model_case.tp: pytest.skip( f"This test requires >={model_case.tp} gpus, got only " f"{torch.cuda.device_count()}" ) # `cudagraph_capture_sizes=[16]` to reduce load time. with vllm_runner( model_case.model_id, tensor_parallel_size=model_case.tp, load_format="dummy", cudagraph_capture_sizes=[16], ) as llm: # Disabled as check_model is broken: https://github.com/vllm-project/vllm/pull/18465#issuecomment-3329880562 # def check_model(model): # from vllm.model_executor.layers.quantization.quark.quark import ( # noqa: E501 # QuarkLinearMethod) # from vllm.model_executor.layers.quantization.quark.schemes.quark_ocp_mx import QuarkOCP_MX # noqa: E501 # from vllm.model_executor.layers.quantization.quark.quark_moe import ( # noqa: E501 # QuarkOCP_MX_MoEMethod) # layer = model.model.layers[0] # qkv_proj = layer.self_attn.qkv_proj # assert isinstance(qkv_proj.quant_method, QuarkLinearMethod) # assert isinstance(qkv_proj.scheme, QuarkOCP_MX) # assert isinstance(layer.mlp.experts.quant_method, # QuarkOCP_MX_MoEMethod) # if model_case.model_id == "fxmarty/qwen_1.5-moe-a2.7b-mxfp4": # llm.apply_model(check_model) output = llm.generate_greedy("Today I am in the French Alps and", max_tokens=20) assert output def swiglu(x, alpha: float = 1.702, beta: float = 1.0, limit: float | None = None): # Note we add an extra bias of 1 to the linear layer x_glu, x_linear = torch.chunk(x, 2, dim=-1) if limit is not None: x_glu = x_glu.clamp(max=limit) x_linear = x_linear.clamp(min=-limit, max=limit) out_glu = x_glu * torch.sigmoid(alpha * x_glu) return out_glu * (x_linear + beta) fp4_lookup_table = [0, 0.5, 1, 1.5, 2, 3, 4, 6, -0, -0.5, -1, -1.5, -2, -3, -4, -6] def mxfp4_dequantize(x, scale): assert x.dtype == torch.uint8 x = x.view(torch.uint8).to(torch.int32) x_unpacked = torch.zeros( *x.shape[:-1], x.shape[-1] * 2, dtype=torch.int32, device=x.device ) x_unpacked[..., 0::2].copy_(x & 0xF) x_unpacked[..., 1::2].copy_((x >> 4) & 0xF) x_float = torch.zeros(x_unpacked.shape, dtype=torch.float32, device=x.device) for i, val in enumerate(fp4_lookup_table): x_float[x_unpacked == i] = val scale = scale.view(torch.uint8).to(torch.int32) scale = (scale << 23).view(torch.float32) scale = scale.reshape(*x.shape[:-1], -1) scale = torch.stack([scale] * 32, dim=-1).reshape(*x_float.shape) return x_float * scale def mxfp8_dequantize(x, scale): assert x.dtype == torch.float8_e4m3fn x_float = x.to(torch.float32) scale = scale.view(torch.uint8).to(torch.int32) scale = (scale << 23).view(torch.float32) scale = scale.reshape(*x.shape[:-1], -1) scale = torch.stack([scale] * 32, dim=-1).reshape(*x_float.shape) return x_float * scale def reference_moe( roouting_logits, topk, num_experts, hidden_states, w13, bias13, w2, bias2, alpha, beta, limit, act_type, ): # renormalize routing experts = torch.topk(roouting_logits, k=topk, dim=-1, sorted=True) expert_weights = torch.nn.functional.softmax(experts.values, dim=1) expert_indices = experts.indices t = hidden_states.clone() # MLP #1 mlp1_weight = w13[expert_indices, ...] mlp1_bias = bias13[expert_indices, ...] t = torch.einsum("beck,bk->bec", mlp1_weight, t) + mlp1_bias t = swiglu(t, alpha=alpha, beta=beta, limit=limit) if act_type == "mxfp8": t_quantized, t_scale = mxfp8_quantize( t.to(torch.bfloat16), is_sf_swizzled_layout=False ) t = mxfp8_dequantize(t_quantized, t_scale) # MLP #2 mlp2_weight = w2[expert_indices, ...] mlp2_bias = bias2[expert_indices, ...] t = torch.einsum("beck,bek->bec", mlp2_weight, t) + mlp2_bias # Weighted sum of experts t = torch.einsum("bec,be->bc", t, expert_weights) assert t.shape == hidden_states.shape return t.to(torch.bfloat16) def get_tile_tokens_dim(x: torch.Tensor, top_k: int, num_experts: int): # Number of tokens in the input tensor. num_tokens = x.shape[0] # Factor to account for the imbalance of the experts. # factor equals to the # max_real_num_tokens_per_expert / perfect_num_tokens_per_expert # - 1.0 means perfect expert distribution. # - > 1.0 means some experts have more # tokens than the perfect distribution. # - < 1.0 does not make sense. imbalance_factor = 1.3 # Calculate the number of tokens per expert # assuming perfect distribution. num_tokens_per_expert = (num_tokens * top_k) // num_experts # Apply the imbalance factor. num_tokens_per_expert = int(num_tokens_per_expert * imbalance_factor) # And pad the number to the next power of 2. tile_tokens_dim = next_positive_power_of_2(num_tokens_per_expert) # Cap to 8-64 tokens per CTA tile # as it's the range supported by the kernel. tile_tokens_dim = min(max(tile_tokens_dim, 8), 64) return tile_tokens_dim def tg_mxfp4_moe( router_logits, topk, num_experts, intermediate_size, hidden_size, hidden_states, hidden_states_scale, w13_weight, w13_weight_scale, w13_bias, w2_weight, w2_weight_scale, w2_bias, act_type, alpha, beta, limit, transpose_optimized: bool = False, ) -> torch.Tensor: sf_block_size = 32 assert ( w13_weight.dim() == 3 and w13_weight.shape[0] == num_experts and w13_weight.shape[1] == intermediate_size * 2 and w13_weight.shape[2] == hidden_size // 2 ) assert ( w13_weight_scale.dim() == 3 and w13_weight_scale.shape[0] == num_experts and w13_weight_scale.shape[1] == intermediate_size * 2 and w13_weight_scale.shape[2] == hidden_size // sf_block_size ) assert ( w2_weight.dim() == 3 and w2_weight.shape[0] == num_experts and w2_weight.shape[1] == hidden_size and w2_weight.shape[2] == intermediate_size // 2 ) assert ( w2_weight_scale.dim() == 3 and w2_weight_scale.shape[1] == hidden_size and w2_weight_scale.shape[2] == intermediate_size // sf_block_size ) assert ( w13_bias.dim() == 2 and w13_bias.shape[0] == num_experts and w13_bias.shape[1] == intermediate_size * 2 ) assert ( w2_bias.dim() == 2 and w2_bias.shape[0] == num_experts and w2_bias.shape[1] == hidden_size ) # Swap w1 and w3 as the definition of # swiglu is different in the trtllm-gen w13_weight_scale_ = w13_weight_scale.clone() w13_weight_ = w13_weight.clone() w13_bias_ = w13_bias.clone() w13_weight[:, :intermediate_size, :].copy_(w13_weight_[:, intermediate_size:, :]) w13_weight[:, intermediate_size:, :].copy_(w13_weight_[:, :intermediate_size, :]) w13_weight_scale[:, :intermediate_size, :].copy_( w13_weight_scale_[:, intermediate_size:, :] ) w13_weight_scale[:, intermediate_size:, :].copy_( w13_weight_scale_[:, :intermediate_size, :] ) w13_bias[:, :intermediate_size].copy_(w13_bias_[:, intermediate_size:]) w13_bias[:, intermediate_size:].copy_(w13_bias_[:, :intermediate_size]) # Interleave the weights and scaling factors for activation w13_weight_interleaved = [] w13_weight_scale_interleaved = [] w13_bias_interleaved = [] for i in range(num_experts): w13_weight_interleaved.append( reorder_rows_for_gated_act_gemm(w13_weight[i].clone()) ) w13_weight_scale_interleaved.append( reorder_rows_for_gated_act_gemm(w13_weight_scale[i].clone()) ) w13_bias_interleaved.append( reorder_rows_for_gated_act_gemm(w13_bias[i].clone().reshape(-1, 1)) ) w13_weight = torch.stack(w13_weight_interleaved).reshape( num_experts, 2 * intermediate_size, hidden_size // 2 ) w13_weight_scale = torch.stack(w13_weight_scale_interleaved).reshape( num_experts, 2 * intermediate_size, hidden_size // 32 ) w13_bias = torch.stack(w13_bias_interleaved).reshape( num_experts, 2 * intermediate_size ) # Shuffle weights and scaling factors for transposed mma output gemm1_weights_shuffled = [] gemm1_scales_shuffled = [] gemm2_weights_shuffled = [] gemm2_scales_shuffled = [] gemm1_bias_shuffled = [] gemm2_bias_shuffled = [] epilogue_tile_m = 128 # FIXME: this depends on the kernel internals _cache_permute_indices: dict[torch.Size, torch.Tensor] = {} if transpose_optimized: for i in range(num_experts): # w13 weight shuffling permute_indices = get_w2_permute_indices_with_cache( _cache_permute_indices, w13_weight[i].view(torch.uint8), epilogue_tile_m, ) gemm1_weights_shuffled.append( w13_weight[i] .view(torch.uint8)[permute_indices.to(w13_weight.device)] .contiguous() ) # w13 scale shuffling permute_sf_indices = get_w2_permute_indices_with_cache( _cache_permute_indices, w13_weight_scale[i].view(torch.uint8), epilogue_tile_m, num_elts_per_sf=16, ) gemm1_scales_shuffled.append( nvfp4_block_scale_interleave( w13_weight_scale[i] .view(torch.uint8)[permute_sf_indices.to(w13_weight_scale.device)] .contiguous() ) ) # w13 bias shuffling permute_bias_indices = get_w2_permute_indices_with_cache( _cache_permute_indices, w13_bias[i].clone().reshape(-1, 1), epilogue_tile_m, ) gemm1_bias_shuffled.append( w13_bias[i] .clone() .reshape(-1, 1)[permute_bias_indices.to(w13_bias.device)] .contiguous() ) # w2 weight shuffling permute_indices = get_w2_permute_indices_with_cache( _cache_permute_indices, w2_weight[i].view(torch.uint8), epilogue_tile_m, ) gemm2_weights_shuffled.append( w2_weight[i] .view(torch.uint8)[permute_indices.to(w2_weight.device)] .contiguous() ) # w2 scale shuffling permute_sf_indices = get_w2_permute_indices_with_cache( _cache_permute_indices, w2_weight_scale[i].view(torch.uint8), epilogue_tile_m, num_elts_per_sf=16, ) gemm2_scales_shuffled.append( nvfp4_block_scale_interleave( w2_weight_scale[i] .view(torch.uint8)[permute_sf_indices.to(w2_weight_scale.device)] .contiguous() ) ) # w2 bias shuffling permute_indices = get_w2_permute_indices_with_cache( _cache_permute_indices, w2_bias[i].clone().reshape(-1, 1), epilogue_tile_m, ) gemm2_bias_shuffled.append( w2_bias[i] .clone() .reshape(-1, 1)[permute_indices.to(w2_bias.device)] .contiguous() ) else: for i in range(num_experts): gemm1_weights_shuffled.append( shuffle_matrix_a(w13_weight[i].view(torch.uint8), epilogue_tile_m) ) gemm1_scales_shuffled.append( shuffle_matrix_sf_a( w13_weight_scale[i].view(torch.uint8), epilogue_tile_m ) ) gemm2_weights_shuffled.append( shuffle_matrix_a(w2_weight[i].view(torch.uint8), epilogue_tile_m) ) gemm2_scales_shuffled.append( shuffle_matrix_sf_a( w2_weight_scale[i].view(torch.uint8), epilogue_tile_m ) ) gemm1_bias_shuffled.append( shuffle_matrix_a(w13_bias[i].reshape(-1, 1), epilogue_tile_m) ) gemm2_bias_shuffled.append( shuffle_matrix_a(w2_bias[i].reshape(-1, 1), epilogue_tile_m) ) w13_weight = torch.stack(gemm1_weights_shuffled) w13_weight_scale = ( torch.stack(gemm1_scales_shuffled) .reshape(num_experts, 2 * intermediate_size, hidden_size // sf_block_size) .view(torch.float8_e4m3fn) ) w13_bias = torch.stack(gemm1_bias_shuffled).reshape(num_experts, -1) w2_weight = torch.stack(gemm2_weights_shuffled) w2_weight_scale = ( torch.stack(gemm2_scales_shuffled) .reshape(num_experts, hidden_size, intermediate_size // sf_block_size) .view(torch.float8_e4m3fn) ) w2_bias = torch.stack(gemm2_bias_shuffled).reshape(num_experts, -1) tg_result = trtllm_fp4_block_scale_moe( routing_logits=router_logits.to(torch.bfloat16), routing_bias=None, hidden_states=hidden_states, hidden_states_scale=hidden_states_scale, gemm1_weights=w13_weight, gemm1_weights_scale=w13_weight_scale, gemm1_bias=w13_bias, gemm1_alpha=alpha, gemm1_beta=beta, gemm1_clamp_limit=limit, gemm2_weights=w2_weight, gemm2_weights_scale=w2_weight_scale, gemm2_bias=w2_bias, output1_scale_scalar=None, output1_scale_gate_scalar=None, output2_scale_scalar=None, num_experts=num_experts, top_k=topk, n_group=None, topk_group=None, intermediate_size=intermediate_size, local_expert_offset=0, local_num_experts=num_experts, routed_scaling_factor=None, tile_tokens_dim=get_tile_tokens_dim(hidden_states, topk, num_experts), routing_method_type=1, # renormalize do_finalize=True, )[0] return tg_result def check_accuracy(a, b, atol, rtol, percent): """Allow a mismatch percentage of 1 - percent.""" if torch.any(torch.isnan(a)): raise Exception("NaN in reference output") if torch.any(torch.isnan(b)): raise Exception("NaN in actual output") if torch.any(torch.isinf(a)): raise Exception("Inf in reference output") if torch.any(torch.isinf(b)): raise Exception("Inf in actual output") assert a.shape == b.shape, f"Shape mismatch: {a.shape} vs {b.shape}" left = torch.abs(a - b) right = atol + rtol * torch.abs(b) count = torch.sum(left > right) mismatch_percent = count / a.numel() if mismatch_percent > 1 - percent: raise Exception( f"Mismatch percentage is {mismatch_percent:.4f} for rtol {rtol} " f"(threshold: {1 - percent:.4f})" ) @pytest.mark.parametrize("topk", [1, 4]) @pytest.mark.parametrize("num_experts", [32, 128]) @pytest.mark.parametrize("num_tokens", [1, 128, 1024]) @pytest.mark.parametrize("intermediate_size,hidden_size", [(3072, 3072)]) @pytest.mark.parametrize("alpha,beta,limit", [(1.0, 1.0, None), (1.702, 1.0, 7.0)]) @pytest.mark.parametrize("act_type", ["mxfp8", "bf16"]) @pytest.mark.parametrize("transpose_optimized", [False, True]) @pytest.mark.skipif( not TRTLLM_GEN_MXFP4_AVAILABLE, reason="nvidia gpu and compute capability sm100 is required for this test", ) def test_trtllm_gen_mxfp4_fused_moe( topk: int, num_experts: int, num_tokens: int, intermediate_size: int, hidden_size: int, alpha: float, beta: float, limit: float | None, act_type: str, transpose_optimized: bool, ): seed = 42 torch.manual_seed(seed) hidden_states = torch.randn( num_tokens, hidden_size, device="cuda:0", dtype=torch.bfloat16 ) w13 = torch.randn( num_experts, intermediate_size * 2, hidden_size, device="cuda:0", dtype=torch.bfloat16, ) w2 = torch.randn( num_experts, hidden_size, intermediate_size, device="cuda:0", dtype=torch.bfloat16, ) bias13 = torch.randn(num_experts, intermediate_size * 2, device="cuda:0") * 10 bias2 = torch.randn(num_experts, hidden_size, device="cuda:0") * 10 router_logits = torch.rand(num_tokens, num_experts, dtype=torch.float32).cuda() w13, w13_scale = fp4_quantize( w13, torch.tensor(1.0, device="cuda:0"), 32, sf_use_ue8m0=True, is_sf_swizzled_layout=False, ) w13_scale = w13_scale.view(torch.float8_e4m3fn).reshape( num_experts, intermediate_size * 2, hidden_size // 32 ) w2, w2_scale = fp4_quantize( w2, torch.tensor(1.0, device="cuda:0"), 32, sf_use_ue8m0=True, is_sf_swizzled_layout=False, ) w2_scale = w2_scale.view(torch.float8_e4m3fn).reshape( num_experts, hidden_size, intermediate_size // 32 ) if act_type == "mxfp8": hidden_states, hidden_states_scale = mxfp8_quantize( hidden_states, is_sf_swizzled_layout=False ) hidden_states_scale = hidden_states_scale.view(torch.float8_e4m3fn).reshape(-1) else: hidden_states_scale = None # reference result ref_result = torch.empty_like(hidden_states, dtype=torch.bfloat16) w13_ref = mxfp4_dequantize(w13.clone(), w13_scale.clone()) w2_ref = mxfp4_dequantize(w2.clone(), w2_scale.clone()) bias13_ref = bias13 bias2_ref = bias2 if act_type == "mxfp8": hidden_states_ref = mxfp8_dequantize(hidden_states, hidden_states_scale).to( torch.float32 ) else: hidden_states_ref = hidden_states.to(torch.float32) # Process tokens in chunks of 32 to reduce memory usage chunk_size = 32 num_chunks = (num_tokens + chunk_size - 1) // chunk_size for i in range(num_chunks): start_idx = i * chunk_size end_idx = min(start_idx + chunk_size, num_tokens) chunk_result = reference_moe( router_logits[start_idx:end_idx].to(torch.float32), topk, num_experts, hidden_states_ref[start_idx:end_idx], w13_ref, bias13_ref, w2_ref, bias2_ref, alpha, beta, limit, act_type, ) ref_result[start_idx:end_idx].copy_(chunk_result) # trtllm-gen result if alpha is not None: alpha = torch.full((num_experts,), alpha, device=hidden_states.device) if limit is not None: limit = torch.full((num_experts,), limit, device=hidden_states.device) if beta is not None: beta = torch.full((num_experts,), beta, device=hidden_states.device) tg_result = tg_mxfp4_moe( router_logits, topk, num_experts, intermediate_size, hidden_size, hidden_states, hidden_states_scale, w13, w13_scale, bias13, w2, w2_scale, bias2, act_type, alpha=alpha, beta=beta, limit=limit, transpose_optimized=transpose_optimized, ) # relatively loose check since the mxfp4 quantization is less accurate check_accuracy(ref_result, tg_result, atol=0, rtol=0.3, percent=0.8) def _interleave_scales_lastdim_by4(scales: torch.Tensor) -> torch.Tensor: """Interleave scales on the last dimension by groups of 4, matching the transformation in mxfp4.py's BF16 (Hopper) path.""" s = scales.to(torch.uint8) s_shape = s.shape assert s_shape[-1] % 4 == 0 s = s.reshape(*s_shape[:-1], s_shape[-1] // 4, 4) # Move the 4-group dimension before the row dimension permuted = s.permute(0, 2, 1, 3) # Merge the row dim with the 4-group dim return permuted.reshape(s_shape[0], s_shape[-1] // 4, s_shape[1] * 4) @pytest.mark.parametrize("topk", [1, 4]) @pytest.mark.parametrize("num_experts", [32]) @pytest.mark.parametrize("num_tokens", [1, 128]) @pytest.mark.parametrize("intermediate_size,hidden_size", [(3072, 3072)]) @pytest.mark.parametrize("alpha,beta,limit", [(1.0, 1.0, None), (1.702, 1.0, 7.0)]) @pytest.mark.skipif( not HOPPER_MXFP4_BF16_AVAILABLE, reason="nvidia gpu sm90 and flashinfer are required for this test", ) def test_flashinfer_cutlass_mxfp4_fused_moe( topk: int, num_experts: int, num_tokens: int, intermediate_size: int, hidden_size: int, alpha: float, beta: float, limit: float | None, ): torch.manual_seed(42) device = "cuda:0" # Inputs hidden_states = torch.randn( num_tokens, hidden_size, device=device, dtype=torch.bfloat16 ) # Random MXFP4 weights and scales (uint8), contiguous [w1; w3] w13_q = torch.randint( 0, 256, (num_experts, 2 * intermediate_size, hidden_size // 2), device=device, dtype=torch.uint8, ) w13_scale = torch.randint( 118, 123, (num_experts, 2 * intermediate_size, hidden_size // 32), device=device, dtype=torch.uint8, ) w2_q = torch.randint( 0, 256, (num_experts, hidden_size, intermediate_size // 2), device=device, dtype=torch.uint8, ) w2_scale = torch.randint( 118, 123, (num_experts, hidden_size, intermediate_size // 32), device=device, dtype=torch.uint8, ) # Bias contiguous [b1; b3] bias13 = ( torch.randn( num_experts, 2 * intermediate_size, device=device, dtype=torch.bfloat16 ) * 10 ) bias2 = ( torch.randn(num_experts, hidden_size, device=device, dtype=torch.bfloat16) * 10 ) router_logits = torch.rand( num_tokens, num_experts, dtype=torch.float32, device=device ) w13_ref = mxfp4_dequantize(w13_q.clone(), w13_scale.clone()).reshape( num_experts, 2 * intermediate_size, hidden_size ) w2_ref = mxfp4_dequantize(w2_q.clone(), w2_scale.clone()).reshape( num_experts, hidden_size, intermediate_size ) ref = reference_moe( router_logits.to(torch.float32), topk, num_experts, hidden_states.to(torch.float32), w13_ref, bias13.to(torch.float32), w2_ref, bias2.to(torch.float32), alpha, beta, limit, "bf16", ) from vllm.utils.flashinfer import flashinfer_cutlass_fused_moe # Swap halves to arrange as [w3; w1] (kernel expectation) w1_w, w3_w = torch.chunk(w13_q, 2, dim=1) w13_q_swapped = torch.cat([w3_w, w1_w], dim=1) b1, b3 = torch.chunk(bias13.to(torch.float32), 2, dim=-1) w13_b = torch.cat([b3, b1], dim=-1).to(torch.bfloat16) w1_s, w3_s = torch.chunk(w13_scale, 2, dim=1) w13_s = torch.cat([w3_s, w1_s], dim=1) w13_s_inter = _interleave_scales_lastdim_by4(w13_s) w2_s_inter = _interleave_scales_lastdim_by4(w2_scale) routing_weights = torch.nn.functional.softmax( router_logits, dim=1, dtype=torch.float32 ) token_final_scales, token_selected_experts = torch.topk( routing_weights, topk, dim=-1 ) token_final_scales = token_final_scales / token_final_scales.sum( dim=-1, keepdim=True ) token_selected_experts = token_selected_experts.to(torch.int).contiguous() out = torch.empty_like(hidden_states, dtype=torch.bfloat16) if alpha is not None: alpha = torch.full((num_experts,), alpha, device=hidden_states.device) if beta is not None: beta = torch.full((num_experts,), beta, device=hidden_states.device) if limit is not None: limit = torch.full((num_experts,), limit, device=hidden_states.device) _ = flashinfer_cutlass_fused_moe( input=hidden_states, token_selected_experts=token_selected_experts, token_final_scales=token_final_scales, fc1_expert_weights=w13_q_swapped, fc2_expert_weights=w2_q, output_dtype=torch.bfloat16, output=out, quant_scales=[w13_s_inter.to(torch.uint8), w2_s_inter.to(torch.uint8)], fc1_expert_biases=w13_b, fc2_expert_biases=bias2.to(torch.bfloat16), swiglu_alpha=alpha, swiglu_beta=beta, swiglu_limit=limit, tp_size=1, tp_rank=0, ep_size=1, ep_rank=0, use_w4_group_scaling=True, ) # Allow some mismatch due to MXFP4 quantization check_accuracy(ref, out, atol=0, rtol=0.3, percent=0.8) @pytest.mark.parametrize("topk", [1, 4]) @pytest.mark.parametrize("num_experts", [32]) @pytest.mark.parametrize("num_tokens", [1, 128]) @pytest.mark.parametrize("intermediate_size,hidden_size", [(3072, 3072)]) @pytest.mark.parametrize("alpha,beta,limit", [(1.0, 1.0, None), (1.702, 1.0, 7.0)]) @pytest.mark.skipif( not ( current_platform.is_cuda() and current_platform.is_device_capability_family(100) and has_flashinfer() ), reason="NVIDIA GPU sm100 and flashinfer are required for this test", ) def test_flashinfer_cutlass_mxfp4_mxfp8_fused_moe( topk: int, num_experts: int, num_tokens: int, intermediate_size: int, hidden_size: int, alpha: float | None, beta: float | None, limit: float | None, ): torch.manual_seed(42) device = "cuda:0" # Inputs hidden_states = torch.randn( num_tokens, hidden_size, device=device, dtype=torch.bfloat16 ) # Float weights in w13 format [w1; w3] w13 = ( torch.randn( num_experts, 2 * intermediate_size, hidden_size, device=device, dtype=torch.bfloat16, ) / 10 ) w2 = ( torch.randn( num_experts, hidden_size, intermediate_size, device=device, dtype=torch.bfloat16, ) / 10 ) # Bias contiguous [b1; b3] bias13 = ( torch.randn( num_experts, 2 * intermediate_size, device=device, dtype=torch.bfloat16 ) * 10 ) bias2 = ( torch.randn(num_experts, hidden_size, device=device, dtype=torch.bfloat16) * 10 ) router_logits = torch.rand( num_tokens, num_experts, dtype=torch.float32, device=device ) # Quantize weights to MXFP4 per expert (SM100 path) from flashinfer import mxfp4_quantize def quant_mxfp4_batches(a: torch.Tensor, e: int): qs, sfs = [], [] for i in range(e): q, sf = mxfp4_quantize(a[i].cuda()) qs.append(q) sfs.append(sf) return torch.stack(qs), torch.stack(sfs) def dequant_mxfp4_batches(mat_fp4: torch.Tensor, scale_tensor: torch.Tensor): num_batches = mat_fp4.size(0) scale_tensor = scale_tensor.view(num_batches, -1) from flashinfer import mxfp4_dequantize return torch.stack( [ mxfp4_dequantize(mat_fp4[b, :, :], scale_tensor[b, :]) for b in range(num_batches) ] ) w13_q, w13_scale = quant_mxfp4_batches(w13, num_experts) w2_q, w2_scale = quant_mxfp4_batches(w2, num_experts) # Reference result using dequantized tensors and reference_moe w13_ref = ( dequant_mxfp4_batches( w13_q.view(torch.uint8), w13_scale.view(torch.uint8).reshape(-1) ) .to(torch.float32) .reshape(num_experts, 2 * intermediate_size, hidden_size) .to(device) ) w2_ref = ( dequant_mxfp4_batches( w2_q.view(torch.uint8), w2_scale.view(torch.uint8).reshape(-1) ) .to(torch.float32) .reshape(num_experts, hidden_size, intermediate_size) .to(device) ) # Quantize activations for SM100 path and dequantize for reference hidden_states_q, hidden_states_sf = mxfp8_quantize(hidden_states, True, 32) # Reference uses BF16 input but quantizes intermediate activation to MXFP8 ref = reference_moe( router_logits.to(torch.float32), topk, num_experts, hidden_states.to(torch.float32), w13_ref, bias13.to(torch.float32), w2_ref, bias2.to(torch.float32), alpha, beta, limit, "mxfp8", ) # Prepare inputs for FlashInfer CUTLASS fused MoE from vllm.utils.flashinfer import flashinfer_cutlass_fused_moe # Swap halves to arrange as [w3; w1] (kernel expectation) w1_w, w3_w = torch.chunk(w13_q, 2, dim=1) w13_q_swapped = torch.cat([w3_w, w1_w], dim=1) # Swap scales halves to match swapped weights s1, s3 = torch.chunk(w13_scale, 2, dim=1) w13_scale_swapped = torch.cat([s3, s1], dim=1) b1, b3 = torch.chunk(bias13.to(torch.float32), 2, dim=-1) w13_b = torch.cat([b3, b1], dim=-1).to(torch.bfloat16) # Build routing for kernel routing_weights = torch.nn.functional.softmax( router_logits, dim=1, dtype=torch.float32 ) token_final_scales, token_selected_experts = torch.topk( routing_weights, topk, dim=-1 ) token_final_scales = token_final_scales / token_final_scales.sum( dim=-1, keepdim=True ) token_selected_experts = token_selected_experts.to(torch.int).contiguous() out = torch.empty_like(hidden_states, dtype=torch.bfloat16) if alpha is not None: alpha_t = torch.full((num_experts,), alpha, device=hidden_states.device) else: alpha_t = None if beta is not None: beta_t = torch.full((num_experts,), beta, device=hidden_states.device) else: beta_t = None if limit is not None: limit_t = torch.full((num_experts,), limit, device=hidden_states.device) else: limit_t = None # Quant scales for SM100 MXFP8+MXFP4 path fake_input_scale = torch.ones(num_experts, device=device) quant_scales = [ w13_scale_swapped.view(torch.int32), fake_input_scale, w2_scale.view(torch.int32), fake_input_scale, ] _ = flashinfer_cutlass_fused_moe( input=hidden_states_q, token_selected_experts=token_selected_experts, token_final_scales=token_final_scales,
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_silu_mul_per_token_group_quant_fp8_colmajor.py
tests/kernels/moe/test_silu_mul_per_token_group_quant_fp8_colmajor.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from vllm.model_executor.layers.quantization.utils.fp8_utils import ( _per_token_group_quant_fp8_colmajor, silu_mul_per_token_group_quant_fp8_colmajor, ) from vllm.platforms import current_platform from vllm.triton_utils import triton from vllm.utils.deep_gemm import is_deep_gemm_e8m0_used FLOAT8_DTYPE = torch.float8_e4m3fn GROUP_SIZE = 128 def reference_quant(x: torch.Tensor, use_ue8m0: bool): """ Reference triton quant kernel from, vllm.model_executor.layers.quantization.utils.fp8_utils """ x_q = torch.empty_like(x, device=x.device, dtype=FLOAT8_DTYPE) # Allocate the scale tensor in column-major format. shape = (x.shape[-1] // GROUP_SIZE,) + x.shape[:-1] x_s = torch.empty(shape, device=x.device, dtype=torch.float32).permute(-1, -2) M = x.numel() // GROUP_SIZE N = GROUP_SIZE BLOCK = triton.next_power_of_2(N) # heuristics for number of warps num_warps = min(max(BLOCK // 256, 1), 8) num_stages = 1 finfo = torch.finfo(FLOAT8_DTYPE) fp8_min = finfo.min fp8_max = finfo.max _per_token_group_quant_fp8_colmajor[(M,)]( x, x_q, x_s, GROUP_SIZE, x.shape[1], x.stride(0), x_s.stride(1), eps=1e-10, fp8_min=fp8_min, fp8_max=fp8_max, use_ue8m0=use_ue8m0, BLOCK=BLOCK, num_warps=num_warps, num_stages=num_stages, ) return x_q, x_s def reference(x: torch.Tensor, use_ue8m0: bool) -> tuple[torch.Tensor, torch.Tensor]: T, N = x.size() ref_act_out = torch.empty((T, N // 2), dtype=torch.bfloat16, device="cuda") torch.ops._C.silu_and_mul(ref_act_out, x) return reference_quant(ref_act_out, use_ue8m0) @pytest.mark.parametrize("T", [128, 256, 512]) @pytest.mark.parametrize("N", [128 * 2, 256 * 2, 768 * 2, 2048 * 2, 7168 * 2]) @pytest.mark.skipif( current_platform.is_rocm(), reason="ROCm does not support DeepGemm.", ) def test_silu_mul_fp8_quant_deep_gemm(T: int, N: int): current_platform.seed_everything(42) input = torch.rand((T, N), dtype=torch.bfloat16, device="cuda") use_ue8m0 = is_deep_gemm_e8m0_used() # Test output, output_scales = silu_mul_per_token_group_quant_fp8_colmajor( input, use_ue8m0=use_ue8m0 ) # Reference ref_output, ref_output_scales = reference(input, use_ue8m0) torch.testing.assert_close(output.to(torch.float32), ref_output.to(torch.float32)) torch.testing.assert_close(output_scales, ref_output_scales)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_cpu_fused_moe.py
tests/kernels/moe/test_cpu_fused_moe.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from tests.kernels.allclose_default import get_default_atol, get_default_rtol from vllm._custom_ops import cpu_fused_moe, cpu_prepack_moe_weight from vllm.model_executor.layers.activation import SiluAndMul, SwigluOAIAndMul from vllm.platforms import current_platform if not current_platform.is_cpu(): pytest.skip("skipping CPU-only tests", allow_module_level=True) EXPERT_NUM = [ 8, ] HIDDEN_DIM = [128, 2880] INTERMEDIATE_DIM = [128, 2880] BATCH_SIZE = [1, 64, 256] ACT = ["silu", "swigluoai"] USE_BIAS = [True, False] ISA = ["amx", "vec"] if torch._C._cpu._is_amx_tile_supported() else ["vec"] DTYPE = [torch.bfloat16] _CPU_MOE_ACT = { "silu": SiluAndMul(), "swigluoai": SwigluOAIAndMul(), } def ref_fused_moe( input: torch.Tensor, w13: torch.Tensor, w2: torch.Tensor, w13_bias: torch.Tensor | None, w2_bias: torch.Tensor | None, topk_weights: torch.Tensor, topk_ids: torch.Tensor, activation: str, ) -> torch.Tensor: len_experts = w13.size(0) cnts = topk_ids.new_zeros((topk_ids.shape[0], len_experts)) cnts.scatter_(1, topk_ids.to(torch.int64), 1) tokens_per_expert = cnts.sum(dim=0) idxs = topk_ids.view(-1).argsort() sorted_tokens = input[idxs // topk_ids.shape[1]] tokens_per_expert = tokens_per_expert.cpu().numpy() outputs = [] start_idx = 0 for i, num_tokens in enumerate(tokens_per_expert): end_idx = start_idx + num_tokens if num_tokens == 0: continue tokens_for_this_expert = sorted_tokens[start_idx:end_idx].float() curr_w13 = w13[i].float() curr_w2 = w2[i].float() curr_w13_bias = None if w13_bias is not None: curr_w13_bias = w13_bias[i].float() curr_w2_bias = None if w2_bias is not None: curr_w2_bias = w2_bias[i].float() gate_up = torch.nn.functional.linear( tokens_for_this_expert, curr_w13, curr_w13_bias ) # Note: to simulate the kernel implementation gate_up = ( _CPU_MOE_ACT[activation] .forward_native(gate_up) .to(dtype=input.dtype) .float() ) expert_out = torch.nn.functional.linear(gate_up, curr_w2, curr_w2_bias) outputs.append(expert_out) start_idx = end_idx outs = torch.cat(outputs, dim=0) if len(outputs) else sorted_tokens.new_empty(0) new_x = torch.empty_like(outs) new_x[idxs] = outs final_out = ( new_x.view(*topk_ids.shape, -1) .mul_(topk_weights.unsqueeze(dim=-1)) .sum(dim=1) .type(input.dtype) ) return final_out @pytest.mark.parametrize("batch_size", BATCH_SIZE) @pytest.mark.parametrize("expert_num", EXPERT_NUM) @pytest.mark.parametrize("hidden_size", HIDDEN_DIM) @pytest.mark.parametrize("intermediate_size", INTERMEDIATE_DIM) @pytest.mark.parametrize("use_bias", USE_BIAS) @pytest.mark.parametrize("dtype", DTYPE) @pytest.mark.parametrize("act", ACT) @pytest.mark.parametrize("isa", ISA) def test_cpu_fused_moe( batch_size: int, expert_num: int, hidden_size: int, intermediate_size: int, use_bias: bool, dtype: torch.dtype, act: str, isa: str, ): current_platform.seed_everything(0) topk_num = max(expert_num // 2, 1) up_dim = 2 * intermediate_size input = torch.randn((batch_size, hidden_size), dtype=dtype) / ( 0.5 * hidden_size**0.5 ) w13 = torch.randn((expert_num, up_dim, hidden_size), dtype=dtype) / ( 0.5 * hidden_size**0.5 ) w2 = torch.randn((expert_num, hidden_size, intermediate_size), dtype=dtype) / ( 0.5 * intermediate_size**0.5 ) router_logits = torch.randn((batch_size, expert_num), dtype=dtype) w13_bias = None w2_bias = None if use_bias: w13_bias = torch.randn((expert_num, up_dim), dtype=dtype) / (0.5 * up_dim**0.5) w2_bias = torch.randn((expert_num, hidden_size), dtype=dtype) / ( 0.5 * hidden_size**0.5 ) score = torch.softmax(router_logits, dim=-1, dtype=torch.float32) topk_weight, topk_ids = torch.topk(score, topk_num) topk_ids = topk_ids.to(torch.int32) ref_output = ref_fused_moe( input, w13, w2, w13_bias, w2_bias, topk_weight, topk_ids, act, ) packed_w13 = cpu_prepack_moe_weight(w13, isa) packed_w2 = cpu_prepack_moe_weight(w2, isa) output = cpu_fused_moe( input, packed_w13, packed_w2, w13_bias, w2_bias, topk_weight, topk_ids, act, isa, ) atol, rtol = get_default_atol(output), get_default_rtol(output) ( torch.testing.assert_close(output, ref_output, atol=atol, rtol=rtol), f"{torch.max(torch.abs(output - ref_output))}", )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_deepep_moe.py
tests/kernels/moe/test_deepep_moe.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Test deepep dispatch-combine logic """ import dataclasses import pytest import torch.distributed from torch.distributed import ProcessGroup from vllm import _custom_ops as ops from vllm.config import VllmConfig, set_current_vllm_config from vllm.model_executor.layers.activation import SiluAndMul from vllm.model_executor.layers.fused_moe import TritonExperts from vllm.model_executor.layers.fused_moe.config import FusedMoEQuantConfig from vllm.model_executor.layers.fused_moe.fused_batched_moe import BatchedTritonExperts from vllm.model_executor.layers.fused_moe.modular_kernel import FusedMoEModularKernel from vllm.model_executor.layers.quantization.utils.fp8_utils import ( per_token_group_quant_fp8, ) from vllm.platforms import current_platform from vllm.utils.import_utils import has_deep_ep from vllm.v1.worker.workspace import init_workspace_manager from ...utils import multi_gpu_test from .parallel_utils import ProcessGroupInfo, parallel_launch if has_deep_ep(): from vllm.model_executor.layers.fused_moe.deepep_ht_prepare_finalize import ( DeepEPHTPrepareAndFinalize, ) from vllm.model_executor.layers.fused_moe.deepep_ll_prepare_finalize import ( DeepEPLLPrepareAndFinalize, ) from .parallel_utils import DeepEPHTArgs, DeepEPLLArgs, make_deepep_a2a requires_deep_ep = pytest.mark.skipif( not has_deep_ep(), reason="Requires deep_ep kernels", ) MAX_TOKENS_PER_RANK = 64 def make_weights( e, n, k, dtype ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ Return weights w1, w2, w1_scale, w2_scale """ if dtype in [torch.float16, torch.bfloat16]: w1 = torch.randn((e, 2 * n, k), device="cuda", dtype=dtype) / 10 w2 = torch.randn((e, k, n), device="cuda", dtype=dtype) / 10 return w1, w2, None, None # per-out-channel weight quantization assert dtype == torch.float8_e4m3fn w1 = torch.empty((e, 2 * n, k), device="cuda", dtype=torch.float16) w2 = torch.empty((e, k, n), device="cuda", dtype=torch.float16) n_b_scales = 2 * n k_b_scales = k w1_q = torch.empty_like(w1, dtype=dtype) w2_q = torch.empty_like(w2, dtype=dtype) w1_scale = torch.empty((e, n_b_scales, 1), device="cuda", dtype=torch.float32) w2_scale = torch.empty((e, k_b_scales, 1), device="cuda", dtype=torch.float32) for expert in range(e): w1_q[expert], w1_scale[expert] = ops.scaled_fp8_quant( w1[expert], use_per_token_if_dynamic=True ) w2_q[expert], w2_scale[expert] = ops.scaled_fp8_quant( w2[expert], use_per_token_if_dynamic=True ) return w1_q, w2_q, w1_scale, w2_scale @dataclasses.dataclass class TestConfig: dtype: torch.dtype topk: int m: int k: int n: int num_experts: int @dataclasses.dataclass class TestTensors: rank_tokens: torch.Tensor # all ranks make this many tokens rank_token_scales: torch.Tensor | None topk: torch.Tensor topk_weights: torch.Tensor config: TestConfig @staticmethod def make(config: TestConfig, low_latency_mode: bool) -> "TestTensors": # TODO (varun) - check that float16 works ? assert config.dtype in [torch.bfloat16, torch.float8_e4m3fn] token_dtype = ( torch.bfloat16 if config.dtype == torch.float8_e4m3fn else config.dtype ) rank_tokens = ( torch.randn((config.m, config.k), device="cuda", dtype=token_dtype) / 10 ) rank_token_scales = None topk = torch.randint( low=0, high=config.num_experts, size=(config.m, config.topk), device="cuda" ).to(dtype=torch.int64) topk_weights = torch.randn(topk.shape, dtype=torch.float32, device="cuda") return TestTensors( rank_tokens=rank_tokens, rank_token_scales=rank_token_scales, topk=topk, topk_weights=topk_weights, config=config, ) def make_modular_kernel( pg: ProcessGroup, pgi: ProcessGroupInfo, low_latency_mode: bool, hidden_size: int, dp_size: int, num_experts: int, num_local_experts: int, q_dtype: torch.dtype | None, use_fp8_dispatch: bool, quant_config: FusedMoEQuantConfig, ) -> FusedMoEModularKernel: ht_args: DeepEPHTArgs | None = None ll_args: DeepEPLLArgs | None = None if low_latency_mode: ll_args = DeepEPLLArgs( max_tokens_per_rank=MAX_TOKENS_PER_RANK, hidden_size=hidden_size, num_experts=num_experts, use_fp8_dispatch=use_fp8_dispatch, ) else: assert not use_fp8_dispatch, ( "FP8 Dispatch is valid only for low-latency kernels" ) ht_args = DeepEPHTArgs(num_local_experts=num_local_experts) a2a: DeepEPHTPrepareAndFinalize | DeepEPLLPrepareAndFinalize = make_deepep_a2a( pg=pg, pgi=pgi, dp_size=dp_size, q_dtype=q_dtype, block_shape=None, deepep_ht_args=ht_args, deepep_ll_args=ll_args, ) num_dispatchers = pgi.world_size // dp_size if low_latency_mode: assert not quant_config.per_act_token_quant, "not supported in ll mode" fused_experts = BatchedTritonExperts( max_num_tokens=MAX_TOKENS_PER_RANK, num_dispatchers=num_dispatchers, quant_config=quant_config, ) else: fused_experts = TritonExperts(quant_config=quant_config) mk = FusedMoEModularKernel(prepare_finalize=a2a, fused_experts=fused_experts) return mk def deep_ep_moe_impl( pg: ProcessGroup, pgi: ProcessGroupInfo, low_latency_mode: bool, dp_size: int, test_tensors: TestTensors, w1: torch.Tensor, w2: torch.Tensor, w1_scale: torch.Tensor | None, w2_scale: torch.Tensor | None, num_experts: int, use_fp8_dispatch: bool, per_act_token_quant: bool, ) -> torch.Tensor: num_local_experts = w1.size(0) def build_expert_map(): num_local_experts = w1.size(0) expert_map = torch.full((num_experts,), fill_value=-1, dtype=torch.int32) s = pgi.rank * num_local_experts e = s + num_local_experts expert_map[s:e] = torch.tensor(list(range(num_local_experts))) return expert_map.to(device=torch.cuda.current_device(), dtype=torch.int32) hidden_size = test_tensors.rank_tokens.size(1) is_quantized = w1.dtype == torch.float8_e4m3fn q_dtype = None if is_quantized: q_dtype = torch.float8_e4m3fn out_hidden_states = torch.empty_like(test_tensors.rank_tokens) total_num_tokens = test_tensors.rank_tokens.size(0) def process_chunk(chunk_start, chunk_end, skip_result_store=False): rank_tokens_chunk = test_tensors.rank_tokens[chunk_start:chunk_end] topk_weights_chunk = test_tensors.topk_weights[chunk_start:chunk_end] topk_chunk = test_tensors.topk[chunk_start:chunk_end] rank_token_scales_chunk = test_tensors.rank_token_scales if ( rank_token_scales_chunk is not None and rank_token_scales_chunk.size(0) == total_num_tokens ): # per act token rank_token_scales_chunk = rank_token_scales_chunk[chunk_start:chunk_end] quant_config = FusedMoEQuantConfig.make( q_dtype, w1_scale=w1_scale, w2_scale=w2_scale, per_act_token_quant=per_act_token_quant, a1_scale=rank_token_scales_chunk, ) # Make modular kernel mk: FusedMoEModularKernel = make_modular_kernel( pg, pgi, low_latency_mode, hidden_size, dp_size, num_experts, num_local_experts, q_dtype, use_fp8_dispatch, quant_config, ) out = mk.forward( hidden_states=rank_tokens_chunk, w1=w1, w2=w2, topk_weights=topk_weights_chunk, topk_ids=topk_chunk, inplace=False, activation="silu", global_num_experts=num_experts, expert_map=build_expert_map(), apply_router_weight_on_input=False, ) if not skip_result_store: out_hidden_states[chunk_start:chunk_end, :].copy_(out, non_blocking=True) max_num_tokens_per_dp = ( MAX_TOKENS_PER_RANK if low_latency_mode else total_num_tokens ) for chunk_start_ in range(0, total_num_tokens, max_num_tokens_per_dp): chunk_start = chunk_start_ chunk_end = min(chunk_start + max_num_tokens_per_dp, total_num_tokens) # clamp start and end chunk_start = min(chunk_start, total_num_tokens - 1) chunk_end = min(chunk_end, total_num_tokens) process_chunk( chunk_start, chunk_end, skip_result_store=chunk_start_ >= total_num_tokens ) return out_hidden_states def torch_moe_impl( test_tensors: TestTensors, w1: torch.Tensor, w2: torch.Tensor, w1_scale: torch.Tensor | None, w2_scale: torch.Tensor | None, using_fp8_dispatch: bool, per_act_token_quant: bool, ): a, topk_ids, topk_weights = ( test_tensors.rank_tokens, test_tensors.topk, test_tensors.topk_weights, ) if using_fp8_dispatch: # The DeepEP implementation is requested to dispatch using FP8. # For numerical stability for testing, emulate the fp8 dispatch by # blockwise quant and de-quant. assert not per_act_token_quant a = test_tensors.rank_tokens aq, aq_scale = per_token_group_quant_fp8(a, 128, use_ue8m0=False) a = ( (aq.view(-1, 128).to(torch.float32) * aq_scale.view(-1, 1)) .view(a.shape) .to(a.dtype) ) is_quantized = w1.dtype == torch.float8_e4m3fn a_dtype = a.dtype if is_quantized: w1 = w1.to(dtype=torch.float32) * w1_scale w2 = w2.to(dtype=torch.float32) * w2_scale a = a.to(dtype=torch.float32) m, _ = a.shape topk = topk_ids.size(1) out = torch.zeros_like(a) for i in range(m): a_i = a[i] o_i = out[i] for j in range(topk): e = topk_ids[i][j] e_w = topk_weights[i][j] w1_e = w1[e] w2_e = w2[e] o_i += ( SiluAndMul()(a_i @ w1_e.transpose(0, 1)) @ w2_e.transpose(0, 1) ) * e_w if is_quantized: out = out.to(dtype=a_dtype) return out def _deep_ep_moe( pgi: ProcessGroupInfo, low_latency_mode: bool, dp_size: int, config: TestConfig, w1: torch.Tensor, w2: torch.Tensor, w1_scale: torch.Tensor | None, w2_scale: torch.Tensor | None, use_fp8_dispatch: bool, per_act_token_quant: bool, ): device = torch.device(f"cuda:{pgi.local_rank}") init_workspace_manager(device) if not low_latency_mode: assert not use_fp8_dispatch, ( "FP8 dispatch interface is available only in low-latency mode" ) is_quantized = w1.dtype == torch.float8_e4m3fn w1 = w1.to(device=torch.cuda.current_device()) w2 = w2.to(device=torch.cuda.current_device()) if is_quantized: w1_scale = w1_scale.to( # type: ignore device=torch.cuda.current_device() ) w2_scale = w2_scale.to( # type: ignore device=torch.cuda.current_device() ) pg = torch.distributed.new_group(list(range(pgi.world_size))) test_tensors = TestTensors.make(config, low_latency_mode) with set_current_vllm_config(VllmConfig()): # Reference torch_combined = torch_moe_impl( test_tensors, w1, w2, w1_scale, w2_scale, use_fp8_dispatch, per_act_token_quant, ) # Splice experts for this rank. num_local_experts = config.num_experts // pgi.world_size e_start = num_local_experts * pgi.rank e_end = e_start + num_local_experts w1_ep = w1[e_start:e_end] w2_ep = w2[e_start:e_end] w1_scale_ep, w2_scale_ep = None, None if is_quantized: w1_scale_ep = w1_scale[e_start:e_end] # type: ignore w2_scale_ep = w2_scale[e_start:e_end] # type: ignore deepep_combined = deep_ep_moe_impl( pg, pgi, low_latency_mode, dp_size, test_tensors, w1_ep, w2_ep, w1_scale_ep, w2_scale_ep, config.num_experts, use_fp8_dispatch, per_act_token_quant, ) torch.testing.assert_close( torch_combined, deepep_combined, atol=6e-2, rtol=6e-2, ) MNKs = [ (1, 128, 128), (2, 128, 512), (3, 1024, 2048), (32, 128, 1024), (45, 512, 2048), (64, 1024, 1024), (222, 1024, 2048), ] DTYPES = [torch.bfloat16, torch.float8_e4m3fn] @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("m,n,k", MNKs) @pytest.mark.parametrize("num_experts", [32]) @pytest.mark.parametrize("topk", [6]) @pytest.mark.parametrize("world_dp_size", [(2, 1)]) @pytest.mark.parametrize("per_act_token_quant", [False, True]) @multi_gpu_test(num_gpus=2) @requires_deep_ep def test_deep_ep_moe( dtype: torch.dtype, m: int, n: int, k: int, num_experts: int, topk: int, world_dp_size: tuple[int, int], per_act_token_quant: bool, workspace_init, ): low_latency_mode = False use_fp8_dispatch = False current_platform.seed_everything(7) world_size, dp_size = world_dp_size config = TestConfig(dtype=dtype, topk=topk, m=m, k=k, n=n, num_experts=num_experts) w1, w2, w1_scale, w2_scale = make_weights(num_experts, n, k, dtype) parallel_launch( world_size, _deep_ep_moe, low_latency_mode, dp_size, config, w1, w2, w1_scale, w2_scale, use_fp8_dispatch, per_act_token_quant, ) MNKs = [ (1, 128, 2560), (2, 128, 2560), (3, 1024, 2560), (32, 128, 2560), (45, 512, 2560), (64, 1024, 2560), (222, 1024, 2560), ] DTYPES = [torch.float8_e4m3fn, torch.bfloat16] USE_FP8_DISPATCH = [True, False] @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("m,n,k", MNKs) @pytest.mark.parametrize("num_experts", [32]) @pytest.mark.parametrize("topk", [6]) @pytest.mark.parametrize("world_dp_size", [(2, 1)]) @pytest.mark.parametrize("use_fp8_dispatch", USE_FP8_DISPATCH) @multi_gpu_test(num_gpus=2) @requires_deep_ep def test_low_latency_deep_ep_moe( dtype: torch.dtype, m: int, n: int, k: int, num_experts: int, topk: int, world_dp_size: tuple[int, int], use_fp8_dispatch: bool, workspace_init, ): low_latency_mode = True if low_latency_mode and k not in DeepEPLLPrepareAndFinalize.SUPPORTED_HIDDEN_SIZES: pytest.skip( f"Skipping test as hidden size {k} is not in list of supported " f"hidden sizes {DeepEPLLPrepareAndFinalize.SUPPORTED_HIDDEN_SIZES}" ) current_platform.seed_everything(7) world_size, dp_size = world_dp_size config = TestConfig(dtype=dtype, topk=topk, m=m, k=k, n=n, num_experts=num_experts) w1, w2, w1_scale, w2_scale = make_weights(num_experts, n, k, dtype) parallel_launch( world_size, _deep_ep_moe, low_latency_mode, dp_size, config, w1, w2, w1_scale, w2_scale, use_fp8_dispatch, False, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_pplx_cutlass_moe.py
tests/kernels/moe/test_pplx_cutlass_moe.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from tests.kernels.utils import torch_experts from vllm import _custom_ops as ops from vllm.config import VllmConfig, set_current_vllm_config from vllm.model_executor.layers.fused_moe.config import fp8_w8a8_moe_quant_config from vllm.model_executor.layers.fused_moe.cutlass_moe import CutlassBatchedExpertsFp8 from vllm.model_executor.layers.fused_moe.fused_moe import fused_topk from vllm.model_executor.layers.fused_moe.modular_kernel import FusedMoEModularKernel from vllm.platforms import current_platform from vllm.utils.math_utils import cdiv from ...utils import multi_gpu_test from .parallel_utils import ProcessGroupInfo, parallel_launch try: from pplx_kernels import AllToAll from pplx_kernels.nvshmem import ( nvshmem_alloc_empty_unique_id, nvshmem_finalize, nvshmem_get_unique_id, nvshmem_init, ) has_pplx = True except ImportError: has_pplx = False requires_pplx = pytest.mark.skipif( not has_pplx, reason="Requires PPLX kernels", ) NUM_EXPERTS = [40, 64] TOP_KS = [6, 8] def rank_chunk(num, r, w): rem = num % w return (num // w) + (1 if r < rem else 0) def chunk_by_rank(t, r, w): num = t.shape[0] chunk = rank_chunk(num, r, w) rem = num % w if rem == 0 or r < rem: return t[(r * chunk) : (r + 1) * chunk].contiguous() else: long_chunks = (num // w + 1) * rem short_chunks = (r - rem) * chunk start = long_chunks + short_chunks return t[start : start + chunk].contiguous() def pplx_cutlass_moe( pgi: ProcessGroupInfo, dp_size: int, a: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, w1_scale: torch.Tensor, w2_scale: torch.Tensor, topk_weights: torch.Tensor, topk_ids: torch.Tensor, a1_scale: torch.Tensor, out_dtype, per_act_token: bool, per_out_ch: bool, group_name: str | None, ): from vllm.model_executor.layers.fused_moe.pplx_prepare_finalize import ( PplxPrepareAndFinalize, ) assert torch.cuda.current_device() == pgi.local_rank num_tokens, hidden_dim = a.shape intermediate_dim = w2.shape[2] num_experts = w1.shape[0] block_size = hidden_dim # TODO support more cases device = pgi.device rank = pgi.rank world_size = pgi.world_size rank_num_tokens = rank_chunk(num_tokens, rank, world_size) max_num_tokens = rank_chunk(num_tokens, 0, world_size) topk = topk_ids.shape[1] if block_size == hidden_dim: scale_elems = 4 # hack to circumvent pplx data format requirements else: scale_elems = (hidden_dim + block_size - 1) // block_size args = dict( max_num_tokens=max_num_tokens, num_experts=num_experts, experts_per_token=topk, rank=rank, world_size=world_size, dp_size=dp_size, hidden_dim=hidden_dim, hidden_dim_bytes=hidden_dim, # because a.dtype.itemsize == 1 hidden_dim_scale_bytes=scale_elems * torch.float32.itemsize, ) if group_name is None: ata = AllToAll.internode(**args) else: args["group_name"] = group_name ata = AllToAll.intranode(**args) w1 = w1.to(device) w2 = w2.to(device) w1_scale = w1_scale.to(device) w2_scale = w2_scale.to(device) a1_scale = a1_scale.to(device) assert num_experts % world_size == 0 num_local_experts = cdiv(num_experts, world_size) num_dispatchers = pgi.world_size // dp_size prepare_finalize = PplxPrepareAndFinalize( ata, max_num_tokens=max_num_tokens, num_local_experts=num_local_experts, num_dispatchers=num_dispatchers, ) ab_strides1 = torch.full( (num_local_experts,), hidden_dim, device="cuda", dtype=torch.int64 ) ab_strides2 = torch.full( (num_local_experts,), intermediate_dim, device="cuda", dtype=torch.int64 ) c_strides1 = torch.full( (num_local_experts,), 2 * intermediate_dim, device="cuda", dtype=torch.int64 ) c_strides2 = torch.full( (num_local_experts,), hidden_dim, device="cuda", dtype=torch.int64 ) experts = CutlassBatchedExpertsFp8( num_local_experts, num_dispatchers, out_dtype, ab_strides1, ab_strides2, c_strides1, c_strides2, fp8_w8a8_moe_quant_config( per_act_token_quant=per_act_token, per_out_ch_quant=per_out_ch, w1_scale=chunk_by_rank(w1_scale, rank, world_size), w2_scale=chunk_by_rank(w2_scale, rank, world_size), a1_scale=chunk_by_rank(a1_scale, rank, world_size) if per_act_token else a1_scale[rank], ), ) fused_cutlass_experts = FusedMoEModularKernel( prepare_finalize, experts, ) a_chunk = chunk_by_rank(a, rank, world_size).to(device) chunk_topk_weight = chunk_by_rank(topk_weights, rank, world_size).to(device) chunk_topk_ids = ( chunk_by_rank(topk_ids, rank, world_size).to(torch.uint32).to(device) ) out = fused_cutlass_experts( a_chunk, chunk_by_rank(w1, rank, world_size), chunk_by_rank(w2, rank, world_size), chunk_topk_weight, chunk_topk_ids, global_num_experts=num_experts, expert_map=None, # TODO ) torch.cuda.synchronize() ata.destroy() return out[:rank_num_tokens] vllm_config = VllmConfig() def _pplx_moe( pgi: ProcessGroupInfo, dp_size: int, a: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, w1_scale: torch.Tensor, w2_scale: torch.Tensor, topk_weights: torch.Tensor, topk_ids: torch.Tensor, a1_scale: torch.Tensor, out_dtype, a_full: torch.Tensor, w1_full: torch.Tensor, w2_full: torch.Tensor, per_act_token: bool, per_out_ch: bool, use_internode: bool, ): try: if use_internode: uid = ( nvshmem_get_unique_id() if pgi.rank == 0 else nvshmem_alloc_empty_unique_id() ) torch.distributed.broadcast(uid, src=0) nvshmem_init(uid, pgi.rank, pgi.world_size) else: group_ranks = list(range(pgi.world_size)) cpu_group = torch.distributed.new_group(group_ranks, backend="gloo") group_name = cpu_group.group_name with set_current_vllm_config(vllm_config): torch_output = torch_experts( a_full, w1_full, w2_full, topk_weights, topk_ids ) pplx_output = pplx_cutlass_moe( pgi, dp_size, a, w1, w2, w1_scale, w2_scale, topk_weights, topk_ids, a1_scale, out_dtype, per_act_token, per_out_ch, group_name, ) torch_output = chunk_by_rank(torch_output, pgi.rank, pgi.world_size).to( pplx_output.device ) # Uncomment if more debugging is needed # print("PPLX OUT:", pplx_output) # print("TORCH OUT:", torch_output) torch.testing.assert_close(pplx_output, torch_output, atol=0.05, rtol=0) finally: if use_internode: nvshmem_finalize() @pytest.mark.parametrize("m", [2, 224]) @pytest.mark.parametrize("n", [3072]) @pytest.mark.parametrize("k", [1536]) @pytest.mark.parametrize("e", NUM_EXPERTS) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("per_act_token", [True, False]) @pytest.mark.parametrize("per_out_ch", [True, False]) @pytest.mark.parametrize("world_dp_size", [[2, 1]]) # , [4, 2]]) @pytest.mark.parametrize("use_internode", [False]) @multi_gpu_test(num_gpus=2) @pytest.mark.skipif( (lambda x: x is None or not ops.cutlass_group_gemm_supported(x.to_int()))( current_platform.get_device_capability() ), reason="Grouped gemm is not supported on this GPU type.", ) @requires_pplx def test_cutlass_moe_pplx( m: int, n: int, k: int, e: int, topk: int, per_act_token: bool, per_out_ch: bool, world_dp_size: tuple[int, int], use_internode: bool, ): current_platform.seed_everything(7) with set_current_vllm_config(vllm_config): dtype = torch.half a = torch.randn((m, k), device="cuda", dtype=dtype) / 10.0 w1 = torch.randn((e, 2 * n, k), device="cuda", dtype=dtype) / 10.0 w2 = torch.randn((e, k, n), device="cuda", dtype=dtype) / 10.0 n_b_scales = 2 * n if per_out_ch else 1 k_b_scales = k if per_out_ch else 1 w1_q = torch.empty((e, 2 * n, k), device="cuda", dtype=torch.float8_e4m3fn) w2_q = torch.empty((e, k, n), device="cuda", dtype=torch.float8_e4m3fn) w1_scale = torch.empty((e, n_b_scales, 1), device="cuda", dtype=torch.float32) w2_scale = torch.empty((e, k_b_scales, 1), device="cuda", dtype=torch.float32) for expert in range(e): w1_q[expert], w1_scale[expert] = ops.scaled_fp8_quant( w1[expert], use_per_token_if_dynamic=per_out_ch ) w2_q[expert], w2_scale[expert] = ops.scaled_fp8_quant( w2[expert], use_per_token_if_dynamic=per_out_ch ) w1_d = torch.empty_like(w1) w2_d = torch.empty_like(w2) for expert in range(e): w1_d[expert] = (w1_q[expert].float() * w1_scale[expert]).half() w2_d[expert] = (w2_q[expert].float() * w2_scale[expert]).half() score = torch.randn((m, e), device="cuda", dtype=dtype) topk_weights, topk_ids, _ = fused_topk(a, score, topk, renormalize=False) world_size, dp_size = world_dp_size a_scale1 = ( torch.randn( (m if per_act_token else 1, 1), device="cuda", dtype=torch.float32 ) / 10.0 ) if not per_act_token: a_scale1 = a_scale1.repeat(world_size, 1) parallel_launch( world_size, _pplx_moe, dp_size, a, w1_q, w2_q, w1_scale, w2_scale, topk_weights, topk_ids, a_scale1, dtype, a, w1_d, w2_d, per_act_token, per_out_ch, use_internode, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_modular_oai_triton_moe.py
tests/kernels/moe/test_modular_oai_triton_moe.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Test modular OAI Triton MoE """ import pytest import torch from vllm.utils.import_utils import has_triton_kernels if not has_triton_kernels(): pytest.skip( "triton_kernels not found, skipping all related tests", allow_module_level=True, ) from triton_kernels.matmul_ogs import FlexCtx, PrecisionConfig from triton_kernels.numerics import InFlexData from triton_kernels.numerics_details.mxfp import downcast_to_mxfp, upcast_from_mxfp from triton_kernels.tensor import FP4, convert_layout, wrap_torch_tensor from triton_kernels.tensor_details import layout from triton_kernels.testing import assert_close from vllm.config import VllmConfig, set_current_vllm_config from vllm.model_executor.layers.fused_moe.config import mxfp4_w4a16_moe_quant_config from vllm.model_executor.layers.fused_moe.gpt_oss_triton_kernels_moe import ( OAITritonExperts, UnfusedOAITritonExperts, ) from vllm.model_executor.layers.fused_moe.modular_kernel import FusedMoEModularKernel from vllm.model_executor.layers.fused_moe.prepare_finalize import ( MoEPrepareAndFinalizeNoEP, ) from vllm.model_executor.layers.utils import shuffle_weight from vllm.platforms import current_platform MNK = [ (1, 512, 384), (1, 2880, 2880), (2, 512, 384), (2, 2880, 2880), (16, 2880, 2880), ] def unshuffle_weight(w: torch.Tensor): first = w[..., ::2] second = w[..., 1::2] return torch.concat((first, second), dim=-1) def make_weights(dtype, k, n, e): w1 = torch.randn((e, k, 2 * n), dtype=dtype, device="cuda") w1_bias = torch.randn((e, 2 * n), dtype=dtype, device="cuda") w2 = torch.randn((e, n, k), dtype=dtype, device="cuda") w2_bias = torch.randn((e, k), dtype=dtype, device="cuda") w1_tri = w1.clone() w2_tri = w2.clone() w1_bias_tri = w1_bias.clone() w2_bias_tri = w2_bias.clone() w1_bias_tri = w1_bias_tri.to(torch.float32) w2_bias_tri = w2_bias_tri.to(torch.float32) # shuffle weights w1_tri = shuffle_weight(w1_tri) w1_bias_tri = shuffle_weight(w1_bias_tri) # quant triton_weights w1_tri, w1_scale_tri = downcast_to_mxfp(w1_tri, torch.uint8, axis=1) w1 = upcast_from_mxfp(w1_tri, w1_scale_tri, dtype, axis=1) w1 = unshuffle_weight(w1) w2_tri, w2_scale_tri = downcast_to_mxfp(w2_tri, torch.uint8, axis=1) w2 = upcast_from_mxfp(w2_tri, w2_scale_tri, dtype, axis=1) num_warps = 8 w_layout, w_layout_opts = layout.make_default_matmul_mxfp4_w_layout(mx_axis=1) w_scale_layout, w_scale_layout_opts = ( layout.make_default_matmul_mxfp4_w_scale_layout(mx_axis=1, num_warps=num_warps) ) w1_tri = convert_layout(wrap_torch_tensor(w1_tri, FP4), w_layout, **w_layout_opts) w1_scale_tri = convert_layout( wrap_torch_tensor(w1_scale_tri), w_scale_layout, **w_scale_layout_opts, ) w2_tri = convert_layout(wrap_torch_tensor(w2_tri, FP4), w_layout, **w_layout_opts) w2_scale_tri = convert_layout( wrap_torch_tensor(w2_scale_tri), w_scale_layout, **w_scale_layout_opts, ) w1_precision_config = PrecisionConfig( weight_scale=w1_scale_tri, flex_ctx=FlexCtx(rhs_data=InFlexData()) ) w2_precision_config = PrecisionConfig( weight_scale=w2_scale_tri, flex_ctx=FlexCtx(rhs_data=InFlexData()) ) return ( w1, w2, w1_bias, w2_bias, w1_tri, w2_tri, w1_bias_tri, w2_bias_tri, w1_precision_config, w2_precision_config, ) def swiglu(x, alpha: float = 1.702, limit: float = 1.0): # Note we add an extra bias of 1 to the linear layer x_glu, x_linear = torch.chunk(x, 2, dim=-1) if limit is not None: x_glu = x_glu.clamp(max=limit) out_glu = x_glu * torch.sigmoid(alpha * x_glu) if limit is not None: x_linear = x_linear.clamp(min=-limit, max=limit) return out_glu * (x_linear + 1) def torch_moe_impl( hidden_states: torch.Tensor, # (M, K) w1: torch.Tensor, # (E, K, 2N) w2: torch.Tensor, # (E, N, K) w1_bias: torch.Tensor, # (E, 2N) w2_bias: torch.Tensor, # (E, K) topk_weights: torch.Tensor, # (M, topk) topk_ids: torch.Tensor, # (M, topk) ): w1 = w1[topk_ids, ...] w1_bias = w1_bias[topk_ids, ...] hidden_states = torch.einsum("bekc,bk->bec", w1, hidden_states) + w1_bias hidden_states = swiglu(hidden_states, limit=7) w2 = w2[topk_ids, ...] w2_bias = w2_bias[topk_ids, ...] hidden_states = torch.einsum("bekc,bek->bec", w2, hidden_states) + w2_bias # Weighted sum of experts hidden_states = torch.einsum("bec,be->bc", hidden_states, topk_weights) return hidden_states def oai_triton_moe_impl( x: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, w1_scale: "PrecisionConfig", w2_scale: "PrecisionConfig", w1_bias: torch.Tensor | None, w2_bias: torch.Tensor | None, num_experts: int, topk_weights: torch.Tensor, topk_ids: torch.Tensor, unfused: bool = False, ) -> torch.Tensor: quant_config = mxfp4_w4a16_moe_quant_config( w1_bias=w1_bias, w2_bias=w2_bias, w1_scale=w1_scale, w2_scale=w2_scale, ) if unfused: fused_experts = UnfusedOAITritonExperts(quant_config) else: fused_experts = OAITritonExperts(quant_config) mk = FusedMoEModularKernel(MoEPrepareAndFinalizeNoEP(), fused_experts) return mk.forward( hidden_states=x, w1=w1, w2=w2, topk_weights=topk_weights, topk_ids=topk_ids, inplace=True, activation="swigluoai", global_num_experts=num_experts, expert_map=None, apply_router_weight_on_input=False, ) @pytest.mark.skipif( not current_platform.is_cuda(), reason="This test is skipped on non-CUDA platform." ) @pytest.mark.parametrize("dtype", [torch.bfloat16]) @pytest.mark.parametrize("m,n,k", MNK) @pytest.mark.parametrize("num_experts", [32, 128]) @pytest.mark.parametrize("topk", [4]) @pytest.mark.parametrize("unfused", [True, False]) def test_oai_triton_moe( dtype: torch.dtype, m: int, n: int, k: int, num_experts: int, topk: int, unfused: bool, workspace_init, ): current_platform.seed_everything(0) ( w1, w2, w1_bias, w2_bias, w1_tri, w2_tri, w1_bias_tri, w2_bias_tri, w1_precision_config, w2_precision_config, ) = make_weights(dtype, k, n, num_experts) x = torch.randn((m, k), dtype=dtype, device="cuda") router_logits = torch.randn(m, num_experts, device="cuda", dtype=dtype) topk_weights, topk_ids = torch.topk(router_logits, k=topk, dim=-1, sorted=True) topk_weights = torch.nn.functional.softmax(topk_weights, dim=-1) with set_current_vllm_config(VllmConfig()): out_ref = torch_moe_impl(x, w1, w2, w1_bias, w2_bias, topk_weights, topk_ids) out = oai_triton_moe_impl( x, w1_tri, w2_tri, w1_precision_config, w2_precision_config, w1_bias_tri, w2_bias_tri, num_experts, topk_weights, topk_ids, unfused, ) assert_close(ref=out_ref, tri=out, maxtol=0.025, rmstol=0.005)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_block_int8.py
tests/kernels/moe/test_block_int8.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from tests.kernels.moe.utils import make_test_quant_config from tests.kernels.quant_utils import ( native_per_token_group_quant_int8, native_w8a8_block_matmul, ) from vllm.config import VllmConfig, set_current_vllm_config from vllm.model_executor.layers.activation import SiluAndMul from vllm.model_executor.layers.fused_moe import fused_experts, fused_topk from vllm.platforms import current_platform if current_platform.get_device_capability() < (7, 0): pytest.skip("INT8 Triton requires CUDA 7.0 or higher", allow_module_level=True) vllm_config = VllmConfig() DTYPES = [torch.bfloat16] MNK_FACTORS = [ (1, 128, 128), (1, 128, 7168), (1, 1024, 7168), (1, 4096, 512), (1, 4096, 7168), (33, 512, 512), (33, 128, 7168), (33, 1024, 7168), (33, 4096, 128), (33, 4096, 7168), (128, 128, 128), (128, 1024, 7168), (128, 4096, 512), (128, 4096, 7168), (222, 512, 512), (222, 1024, 7168), (222, 4096, 7168), (2048, 128, 128), (2048, 1024, 7168), (2048, 4096, 4096), ] E = [8, 24] TOP_KS = [2, 6] # BLOCK_SIZE = [[64, 64], [64, 128], [128, 64], [128, 128]] BLOCK_SIZE = [[128, 128]] SEEDS = [0] # For test def torch_w8a8_block_int8_moe(a, w1, w2, w1_s, w2_s, score, topk, block_shape): """This function performs fused moe with block-wise quantization using native torch.""" B, D = a.shape a = a.view(B, -1, D).repeat(1, topk, 1).reshape(-1, D) out = torch.zeros(B * topk, w2.shape[1], dtype=a.dtype, device=a.device) score = torch.softmax(score, dim=-1, dtype=torch.float32) topk_weight, topk_ids = torch.topk(score, topk) topk_weight = topk_weight.view(-1) topk_ids = topk_ids.view(-1) _, block_k = block_shape[0], block_shape[1] a_q, a_s = native_per_token_group_quant_int8(a, block_k) for i in range(w1.shape[0]): mask = topk_ids == i if mask.sum(): inter_out = native_w8a8_block_matmul( a_q[mask], w1[i], a_s[mask], w1_s[i], block_shape, output_dtype=a.dtype ) act_out = SiluAndMul().forward_native(inter_out) act_out_q, act_out_s = native_per_token_group_quant_int8(act_out, block_k) act_out = act_out.to(torch.float32) out[mask] = native_w8a8_block_matmul( act_out_q, w2[i], act_out_s, w2_s[i], block_shape, output_dtype=a.dtype ) return ( out.view(B, -1, w2.shape[1]) * topk_weight.view(B, -1, 1).to(out.dtype) ).sum(dim=1) @pytest.fixture(autouse=True, scope="module") def setup_cuda(): """Sets the default CUDA device for all tests in this module.""" torch.set_default_device("cuda") @pytest.mark.parametrize(("M", "N", "K"), MNK_FACTORS) @pytest.mark.parametrize("E", E) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("block_size", BLOCK_SIZE) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("seed", SEEDS) @torch.inference_mode() def test_w8a8_block_int8_fused_moe(M, N, K, E, topk, block_size, dtype, seed): """Tests the fused_moe kernel with W8A8 INT8 block quantization against a native torch reference.""" torch.manual_seed(seed) a = torch.randn((M, K), dtype=dtype) / 10 score = torch.randn((M, E), dtype=dtype) topk_weights, topk_ids, _ = fused_topk(a, score.float(), topk, False) w1, w2, quant_config = make_test_quant_config( E, N, K, dtype, quant_dtype=torch.int8, per_act_token_quant=False, block_shape=block_size, ) # Set the context to avoid lots of warning spam. with set_current_vllm_config(vllm_config): out = fused_experts( a, w1, w2, topk_weights, topk_ids, quant_config=quant_config ) ref_out = torch_w8a8_block_int8_moe( a, w1, w2, quant_config.w1_scale, quant_config.w2_scale, score, topk, block_size, ) # Check results torch.testing.assert_close(out, ref_out, atol=0.065, rtol=0.065)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_modular_kernel_combinations.py
tests/kernels/moe/test_modular_kernel_combinations.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import copy import textwrap import traceback from itertools import product from typing import Any import pytest import torch import vllm.model_executor.layers.fused_moe.modular_kernel as mk from vllm.config import VllmConfig, set_current_vllm_config from vllm.platforms import current_platform from vllm.utils.flashinfer import has_flashinfer_cutlass_fused_moe from vllm.utils.import_utils import has_deep_ep, has_deep_gemm, has_pplx from vllm.utils.torch_utils import cuda_device_count_stateless from vllm.v1.worker.workspace import init_workspace_manager from .modular_kernel_tools.common import ( Config, RankTensors, WeightTensors, reference_moe_impl, run_modular_kernel, ) from .modular_kernel_tools.mk_objects import ( MK_FUSED_EXPERT_TYPES, MK_MULTI_GPU_PREPARE_FINALIZE_TYPES, MK_QUANT_CONFIGS, MK_SINGLE_GPU_PREPARE_FINALIZE_TYPES, TestMoEQuantConfig, expert_info, ) from .modular_kernel_tools.parallel_utils import ( ProcessGroupInfo, parallel_launch_with_config, ) has_any_multi_gpu_package = ( has_deep_ep() or has_deep_gemm() or has_pplx() or has_flashinfer_cutlass_fused_moe() ) meets_multi_gpu_requirements = pytest.mark.skipif( not has_any_multi_gpu_package, reason="Requires deep_ep or deep_gemm or pplx or flashinfer packages", ) if current_platform.is_fp8_fnuz(): pytest.skip( "Tests in this file require float8_e4m3fn and platform does not support", allow_module_level=True, ) def format_result(verbose, msg, ex=None): if ex is not None: x = str(ex) newx = x.strip(" \n\t")[:16] if len(newx) < len(x): newx = newx + " ..." prefix = "E\t" print(f"{textwrap.indent(traceback.format_exc(), prefix)}") print(f"FAILED {msg} - {newx}\n") elif verbose: print(f"PASSED {msg}") else: print(".", end="") def rank_worker( pgi: ProcessGroupInfo, vllm_config: VllmConfig, cpu_group, base_config: Config, weights: WeightTensors, verbose: bool, ): # Initialize workspace manager in child process device = torch.device(f"cuda:{pgi.local_rank}") init_workspace_manager(device) current_platform.seed_everything(pgi.rank) # sanity check from vllm import envs if base_config.fused_moe_chunk_size is not None: assert base_config.fused_moe_chunk_size == envs.VLLM_FUSED_MOE_CHUNK_SIZE # get weights to this device weights.to_current_device() Ms = base_config.Ms assert isinstance(Ms, list) TOPKs = base_config.topks assert isinstance(TOPKs, list) exceptions = [] count = 0 for m, topk in product(Ms, TOPKs): # override m and topk config = copy.deepcopy(base_config) config.Ms = m config.topks = topk try: print(f"Running[{pgi.rank}]: m={m}, topk={topk} ...") count = count + 1 # inputs for rank rank_tensors = RankTensors.make(config, pgi) # modular kernel out mk_out = run_modular_kernel(pgi, vllm_config, config, weights, rank_tensors) with set_current_vllm_config(vllm_config): ref_out = reference_moe_impl(config, weights, rank_tensors) if config.quant_dtype == "nvfp4": atol = 1e-1 if config.K < 4096 else 2e-1 rtol = 1e-1 if config.K < 4096 else 2e-1 else: atol = 3e-2 rtol = 3e-2 torch.testing.assert_close(ref_out, mk_out, atol=atol, rtol=rtol) format_result(verbose, config.describe()) except Exception as ex: format_result(verbose, config.describe(), ex) exceptions.append(ex) if len(exceptions) > 0: raise RuntimeError( f"{len(exceptions)} of {count} tests failed in child process, " f"rank={pgi.rank}." ) else: print(f"{count} of {count} tests passed in child process, rank={pgi.rank}.") def run(config: Config, verbose: bool): assert config.is_valid()[0] assert not is_nyi_config(config) weights: WeightTensors = WeightTensors.make(config) vllm_config, env_dict = config.make_env_data() parallel_launch_with_config( config.world_size, rank_worker, vllm_config, env_dict, config, weights, verbose ) Ms = [32, 64] # hidden sizes, making this too large will cause fp4 tests to fail. # Also needs to be a multiple of 1024 for deep_gemm. Ks = [2048] Ns = [1024] TOPKs = [4, 1] Es = [32] DTYPEs = [torch.bfloat16] FUSED_MOE_CHUNK_SIZEs = [None, 16] def is_nyi_config(config: Config) -> bool: # We know these configs to be legitimate. but still fail. info = expert_info(config.fused_experts_type) if info.needs_matching_quant: # The triton kernels expect both per-act-token-quant and # per-out-ch-quant or neither. unsupported_quant_config = ( config.is_per_act_token_quant + config.is_per_out_ch_quant ) == 1 return unsupported_quant_config return not info.supports_expert_map def generate_valid_test_cases( world_size: int, prepare_finalize_types ) -> list[tuple[Any, ...]]: cases = [] total = 0 for k, n, e, dtype, quant_config, combination, chunk_size in product( Ks, Ns, Es, DTYPEs, MK_QUANT_CONFIGS, product(prepare_finalize_types, MK_FUSED_EXPERT_TYPES), FUSED_MOE_CHUNK_SIZEs, ): total = total + 1 config = Config( Ms=Ms, K=k, N=n, E=e, topks=TOPKs, dtype=dtype, quant_config=quant_config, prepare_finalize_type=combination[0], fused_experts_type=combination[1], fused_moe_chunk_size=chunk_size, world_size=world_size, ) # TODO(bnell): figure out how to get verbose flag here. verbose = False # pytestconfig.getoption('verbose') > 0 valid, reason = config.is_valid() if not valid: if verbose: print(f"Test config {config} is not valid: {reason}") continue if is_nyi_config(config): if verbose: print(f"Test config {config} is nyi.") continue cases.append( ( k, n, e, dtype, quant_config, combination[0], combination[1], chunk_size, world_size, ) ) print(f"{len(cases)} of {total} valid configs generated.") return cases @pytest.mark.parametrize( "k,n,e,dtype,quant_config,prepare_finalize_type,fused_experts_type,chunk_size,world_size", generate_valid_test_cases( world_size=2, prepare_finalize_types=MK_MULTI_GPU_PREPARE_FINALIZE_TYPES ), ) @meets_multi_gpu_requirements def test_modular_kernel_combinations_multigpu( k: int, n: int, e: int, dtype: torch.dtype, quant_config: TestMoEQuantConfig | None, prepare_finalize_type: mk.FusedMoEPrepareAndFinalize, fused_experts_type: mk.FusedMoEPermuteExpertsUnpermute, chunk_size: int | None, world_size: int, pytestconfig, ): if cuda_device_count_stateless() < world_size: pytest.skip( f"Not enough GPUs available to run, got " f"{cuda_device_count_stateless()} exepected " f"{world_size}." ) config = Config( Ms=Ms, K=k, N=n, E=e, topks=TOPKs, dtype=dtype, quant_config=quant_config, prepare_finalize_type=prepare_finalize_type, fused_experts_type=fused_experts_type, fused_moe_chunk_size=chunk_size, world_size=world_size, ) verbosity = pytestconfig.getoption("verbose") run(config, verbosity > 0) @pytest.mark.parametrize( "k,n,e,dtype,quant_config,prepare_finalize_type,fused_experts_type,chunk_size,world_size", generate_valid_test_cases( world_size=1, prepare_finalize_types=MK_SINGLE_GPU_PREPARE_FINALIZE_TYPES ), ) def test_modular_kernel_combinations_singlegpu( k: int, n: int, e: int, dtype: torch.dtype, quant_config: TestMoEQuantConfig | None, prepare_finalize_type: mk.FusedMoEPrepareAndFinalize, fused_experts_type: mk.FusedMoEPermuteExpertsUnpermute, chunk_size: int | None, world_size: int, pytestconfig, workspace_init, ): """Note: float8_e4m3fn is not supported on CUDA architecture < 89, and those tests will be skipped on unsupported hardware.""" config = Config( Ms=Ms, K=k, N=n, E=e, topks=TOPKs, dtype=dtype, quant_config=quant_config, prepare_finalize_type=prepare_finalize_type, fused_experts_type=fused_experts_type, fused_moe_chunk_size=chunk_size, world_size=world_size, ) if ( quant_config is not None and quant_config.quant_dtype == torch.float8_e4m3fn ) and not current_platform.has_device_capability(89): pytest.skip( "Triton limitation: fp8e4nv data type is not supported on CUDA arch < 89" ) verbosity = pytestconfig.getoption("verbose") run(config, verbosity > 0) if __name__ == "__main__": # Ability to test individual PrepareAndFinalize and FusedExperts combination from .modular_kernel_tools.cli_args import make_config, make_config_arg_parser parser = make_config_arg_parser( description=( "Run single prepare-finalize & fused-experts combination test" "Example : python3 -m tests.kernels.moe.test_modular_kernel_combinations " "--pf-type PplxPrepareAndFinalize --experts-type BatchedTritonExperts" ) ) args = parser.parse_args() config = make_config(args) run(config, True)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/test_batched_moe.py
tests/kernels/moe/test_batched_moe.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from dataclasses import dataclass import pytest import torch from tests.kernels.moe.utils import ( batched_moe, make_quantized_test_activations, make_test_weights, naive_batched_moe, ) from tests.kernels.quant_utils import native_batched_masked_quant_matmul from tests.kernels.utils import torch_experts from vllm.config import VllmConfig, set_current_vllm_config from vllm.model_executor.layers.fused_moe.fused_batched_moe import ( invoke_moe_batched_triton_kernel, ) from vllm.model_executor.layers.fused_moe.fused_moe import fused_topk from vllm.platforms import current_platform from vllm.triton_utils import tl MNK_FACTORS = [ (1, 128, 128), (1, 512, 512), (1, 1024, 2048), (32, 128, 128), (32, 512, 512), (32, 1024, 2048), (45, 128, 2048), (45, 1024, 128), (64, 512, 512), (64, 1024, 2048), (222, 128, 2048), (222, 1024, 2048), ] NUM_EXPERTS = [8, 64] TOP_KS = [1, 2, 6] DTYPES = [torch.bfloat16] if not current_platform.is_fp8_fnuz(): DTYPES.append(torch.float8_e4m3fn) vllm_config = VllmConfig() @dataclass class BatchedMMConfig: in_dtype: torch.dtype quant_dtype: torch.dtype | None out_dtype: torch.dtype num_experts: int max_tokens_per_expert: int K: int N: int @dataclass class BatchedMMTensors: A: torch.Tensor # [E, max_tokens, K] B: torch.Tensor # [E, K, N] - column major C: torch.Tensor # [E, max_tokens, N] num_expert_tokens: torch.Tensor # [E] @staticmethod def make_tensors(config: BatchedMMConfig): A = ( torch.randn( (config.num_experts, config.max_tokens_per_expert, config.K), device="cuda", dtype=config.in_dtype, ) / 10 ) B = torch.randn( (config.num_experts, config.N, config.K), device="cuda", dtype=config.in_dtype, ) C = torch.zeros( (config.num_experts, config.max_tokens_per_expert, config.N), device="cuda", dtype=config.out_dtype, ) num_expert_tokens = torch.randint( low=0, high=config.max_tokens_per_expert, size=(config.num_experts,), device="cuda", dtype=torch.int32, ) return BatchedMMTensors(A, B, C, num_expert_tokens) @pytest.mark.parametrize("num_experts", [8, 32]) @pytest.mark.parametrize("max_tokens_per_expert", [32, 224, 512]) @pytest.mark.parametrize("K", [128, 1024]) @pytest.mark.parametrize("N", [128, 1024]) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("block_shape", [None, [128, 128]]) @pytest.mark.parametrize("per_act_token_quant", [False, True]) def test_batched_mm( num_experts: int, max_tokens_per_expert: int, K: int, N: int, dtype: torch.dtype, block_shape: list[int] | None, per_act_token_quant: bool, ): """Note: float8_e4m3fn is not supported on CUDA architecture < 89, and those tests will be skipped on unsupported hardware.""" current_platform.seed_everything(7) use_fp8_w8a8 = dtype == torch.float8_e4m3fn if (dtype == torch.float8_e4m3fn) and not current_platform.has_device_capability( 89 ): pytest.skip( "Triton limitation: fp8e4nv data type is not supported on CUDA arch < 89" ) if (per_act_token_quant or block_shape is not None) and not use_fp8_w8a8: pytest.skip("Don't test blocking for non-quantized types.") if per_act_token_quant and block_shape is not None: pytest.skip("Skip illegal quantization test.") if dtype.itemsize == 1: act_dtype = torch.bfloat16 quant_dtype = dtype else: act_dtype = dtype quant_dtype = None num_expert_tokens = torch.randint( low=0, high=max_tokens_per_expert, size=(num_experts,), device="cuda", dtype=torch.int32, ) A, A_q, A_scale = make_quantized_test_activations( num_experts, max_tokens_per_expert, K, in_dtype=act_dtype, quant_dtype=quant_dtype, block_shape=block_shape, per_act_token_quant=per_act_token_quant, ) (B, B_q, B_scale, _), _ = make_test_weights( num_experts, N // 2, K, in_dtype=act_dtype, quant_dtype=quant_dtype, block_shape=block_shape, per_out_ch_quant=per_act_token_quant, ) out_shape = (num_experts, max_tokens_per_expert, N) test_output = torch.zeros(out_shape, dtype=act_dtype, device="cuda") ref_output = torch.zeros(out_shape, dtype=act_dtype, device="cuda") q_ref_output = torch.zeros(out_shape, dtype=act_dtype, device="cuda") compute_tl_dtype = { torch.float16: tl.float16, torch.bfloat16: tl.bfloat16, torch.float32: tl.float32, }[test_output.dtype] assert A_q.dtype == B_q.dtype invoke_moe_batched_triton_kernel( A_q, B_q, test_output, num_expert_tokens, compute_tl_dtype, # Quantization data A_scale, B_scale, None, # Quantization schemes use_fp8_w8a8, False, False, config={ "BLOCK_SIZE_M": 16, "BLOCK_SIZE_N": 16, "BLOCK_SIZE_K": 16 if dtype.itemsize > 1 else 32, }, per_act_token_quant=per_act_token_quant, block_shape=block_shape, ) ref_output = native_batched_masked_quant_matmul( A, B, ref_output, num_expert_tokens, ) q_ref_output = native_batched_masked_quant_matmul( A_q, B_q, q_ref_output, num_expert_tokens, A_scale, B_scale, block_shape, per_act_token_quant, ) rtol, atol = { torch.float16: (6e-2, 6e-2), torch.bfloat16: (6e-2, 6e-2), torch.float32: (1e-2, 1e-2), }[test_output.dtype] torch.testing.assert_close(ref_output, q_ref_output, atol=atol, rtol=rtol) torch.testing.assert_close(test_output, q_ref_output, atol=atol, rtol=rtol) @pytest.mark.parametrize(("m", "n", "k"), MNK_FACTORS) @pytest.mark.parametrize("e", NUM_EXPERTS) @pytest.mark.parametrize("topk", TOP_KS) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("per_act_token_quant", [False, True]) @pytest.mark.parametrize("block_shape", [None, [128, 128]]) @pytest.mark.parametrize("input_scales", [False]) def test_fused_moe_batched_experts( m: int, n: int, k: int, e: int, topk: int, dtype: torch.dtype, per_act_token_quant: bool, block_shape: list[int] | None, input_scales: bool, workspace_init, ): """Note: float8_e4m3fn is not supported on CUDA architecture < 89, and those tests will be skipped on unsupported hardware.""" current_platform.seed_everything(7) use_fp8_w8a8 = dtype == torch.float8_e4m3fn if (dtype == torch.float8_e4m3fn) and not current_platform.has_device_capability( 89 ): pytest.skip( "Triton limitation: fp8e4nv data type is not supported on CUDA arch < 89" ) if topk > e: pytest.skip("topk > e") if not use_fp8_w8a8 and (per_act_token_quant or block_shape is not None): pytest.skip("Skip quantization test for non-quantized type") if per_act_token_quant and block_shape is not None: pytest.skip("Skip illegal quantization test.") a = torch.randn((m, k), device="cuda", dtype=torch.bfloat16) / 10 score = torch.randn((m, e), device="cuda", dtype=torch.bfloat16) if dtype.itemsize == 1: act_dtype = torch.bfloat16 quant_dtype = dtype else: act_dtype = dtype quant_dtype = None (w1_16, w1, w1_s, _), (w2_16, w2, w2_s, _) = make_test_weights( e, n, k, block_shape=block_shape, in_dtype=act_dtype, quant_dtype=quant_dtype, per_out_ch_quant=per_act_token_quant, ) if input_scales and quant_dtype is not None: a1_scale = torch.tensor(1, device="cuda", dtype=torch.float32) a2_scale = torch.tensor(1, device="cuda", dtype=torch.float32) else: a1_scale = None a2_scale = None with set_current_vllm_config(vllm_config): topk_weight, topk_ids, _ = fused_topk(a, score, topk, False) baseline_output = torch_experts( a, w1, w2, topk_weight, topk_ids, w1_scale=w1_s, w2_scale=w2_s, a1_scale=a1_scale, a2_scale=a2_scale, quant_dtype=quant_dtype, per_act_token_quant=per_act_token_quant, block_shape=block_shape, ) batched_output = naive_batched_moe( a, w1, w2, topk_weight, topk_ids, w1_scale=w1_s, w2_scale=w2_s, a1_scale=a1_scale, a2_scale=a2_scale, quant_dtype=quant_dtype, per_act_token_quant=per_act_token_quant, block_shape=block_shape, ) triton_output = batched_moe( a, w1, w2, topk_weight, topk_ids, w1_scale=w1_s, w2_scale=w2_s, a1_scale=a1_scale, a2_scale=a2_scale, quant_dtype=quant_dtype, per_act_token_quant=per_act_token_quant, block_shape=block_shape, ) torch.testing.assert_close(batched_output, baseline_output, atol=3e-2, rtol=2e-2) torch.testing.assert_close(triton_output, batched_output, atol=2e-2, rtol=2e-2)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/modular_kernel_tools/make_feature_matrix.py
tests/kernels/moe/modular_kernel_tools/make_feature_matrix.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import copy from enum import Enum from itertools import product import torch from tqdm import tqdm from vllm.config import VllmConfig, set_current_vllm_config from vllm.model_executor.layers.fused_moe.config import FUSED_MOE_UNQUANTIZED_CONFIG from vllm.platforms import current_platform from .common import ( Config, RankTensors, WeightTensors, reference_moe_impl, run_modular_kernel, ) from .mk_objects import ( MK_FUSED_EXPERT_TYPES, MK_MULTI_GPU_PREPARE_FINALIZE_TYPES, MK_QUANT_CONFIGS, ) from .parallel_utils import ProcessGroupInfo, parallel_launch_with_config class Result(Enum): PASS = 1 FAIL = 2 SKIP = 3 def rank_worker( pgi: ProcessGroupInfo, vllm_config: VllmConfig, cpu_group, config: Config, weights: WeightTensors, ): current_platform.seed_everything(pgi.rank) # sanity check from vllm import envs if config.fused_moe_chunk_size is not None: assert config.fused_moe_chunk_size == envs.VLLM_FUSED_MOE_CHUNK_SIZE # get weights to this device weights.to_current_device() Ms = config.Ms assert isinstance(Ms, list) TOPKs = config.topks assert isinstance(TOPKs, list) for m, topk in product(Ms, TOPKs): print(f"Running m={m}, topk={topk} ...") # override m and topk cfgx = copy.deepcopy(config) cfgx.Ms = m cfgx.topks = topk # inputs for rank rank_tensors = RankTensors.make(cfgx, pgi) # modular kernel out mk_out = run_modular_kernel(pgi, vllm_config, cfgx, weights, rank_tensors) with set_current_vllm_config(vllm_config): ref_out = reference_moe_impl(cfgx, weights, rank_tensors) torch.testing.assert_close(ref_out, mk_out, atol=3e-2, rtol=3e-2) def make_feature_matrix(csv_file_path: str): from dataclasses import asdict import pandas as pd def add_to_results( config: Config, success: Result, results_df: pd.DataFrame | None = None ): config_dict = asdict(config) config_dict["prepare_finalize_type"] = config_dict[ "prepare_finalize_type" ].__name__ config_dict["fused_experts_type"] = config_dict["fused_experts_type"].__name__ config_dict["per_tensor_act_quant"] = config.is_per_tensor_act_quant quant_config_dict = config_dict["quant_config"] del config_dict["quant_config"] if quant_config_dict is None: quant_config = FUSED_MOE_UNQUANTIZED_CONFIG quant_config_dict = asdict(quant_config) config_dict |= quant_config_dict result_dict = config_dict | {"success": success.name} result_df = pd.DataFrame([result_dict]) if results_df is None: results_df = result_df else: results_df = pd.concat([results_df, result_df], ignore_index=True) return results_df Ms = [64] Ks = [7168] # hidden sizes Ns = [2048] TOPKs = [[4, 1]] Es = [32] DTYPEs = [torch.bfloat16] PF_TYPES = MK_MULTI_GPU_PREPARE_FINALIZE_TYPES FE_TYPES = MK_FUSED_EXPERT_TYPES Q_TYPES = MK_QUANT_CONFIGS combinations = list( product(Ms, Ks, Ns, Es, TOPKs, DTYPEs, PF_TYPES, FE_TYPES, Q_TYPES) ) results_df: pd.DataFrame | None = None for m, k, n, e, topks, dtype, pf_type, experts_type, quant_config in tqdm( combinations ): config = Config( Ms=[m], K=k, N=n, E=e, topks=topks, dtype=dtype, prepare_finalize_type=pf_type, fused_experts_type=experts_type, quant_config=quant_config, world_size=2, fused_moe_chunk_size=None, ) success = None if config.is_valid()[0]: print(f"Running config : {config.describe()} ...") try: weights: WeightTensors = WeightTensors.make(config) vllm_config, env_dict = config.make_env_data() parallel_launch_with_config( config.world_size, rank_worker, vllm_config, env_dict, config, weights, ) success = Result.PASS except Exception as _: success = Result.FAIL else: success = Result.SKIP results_df = add_to_results(config, success, results_df) if results_df is not None: results_df.to_csv(f"{csv_file_path}") if __name__ == "__main__": import argparse from pathlib import Path parser = argparse.ArgumentParser( description=( "Make ModularKernel feature matrix \n" "Example : python3 -m tests.kernels.moe.modular_kernel_tools.make_feature_matrix " # noqa: E501 "-f ./feature_matrices/feature_matrix.csv" ) ) parser.add_argument( "-f", "--feature-matrix-csv-file-path", type=str, required=True, help="File name to Generate a .csv file", ) args = parser.parse_args() csv_path = args.feature_matrix_csv_file_path assert csv_path.endswith("csv"), ( f"Need a file path ending with .csv, got {csv_path}" ) assert Path(csv_path).parent.is_dir(), ( f"Cannot find parent directory for {Path(csv_path).parent}" ) make_feature_matrix(args.feature_matrix_csv_file_path)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/modular_kernel_tools/common.py
tests/kernels/moe/modular_kernel_tools/common.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from dataclasses import dataclass from typing import Any import torch import vllm._custom_ops as ops import vllm.model_executor.layers.fused_moe.modular_kernel as mk from tests.kernels.moe.utils import make_test_weights, per_token_cast_to_fp8 from tests.kernels.quantization.nvfp4_utils import ( FLOAT4_E2M1_MAX, FLOAT8_E4M3_MAX, dequantize_nvfp4_to_dtype, ) from tests.kernels.utils import torch_experts from vllm.config import VllmConfig from vllm.distributed import ( get_dp_group, get_pcp_group, get_tensor_model_parallel_world_size, ) from vllm.forward_context import set_forward_context from vllm.model_executor.layers.fused_moe.config import ( FusedMoEConfig, FusedMoEParallelConfig, FusedMoEQuantConfig, ) from vllm.model_executor.layers.fused_moe.fused_moe import fused_topk from vllm.utils.import_utils import has_deep_ep, has_deep_gemm, has_pplx from .mk_objects import ( TestMoEQuantConfig, expert_info, make_fused_experts, make_prepare_finalize, prepare_finalize_info, ) from .parallel_utils import ProcessGroupInfo def _describe_tensor(t: torch.Tensor | None, name: str) -> str: if t is None: return f"{name} : None" else: return f"{name} : {t.shape} {t.dtype} {t.device}" @dataclass class Config: Ms: list[int] | int K: int N: int E: int topks: list[int] | int dtype: torch.dtype quant_config: TestMoEQuantConfig | None prepare_finalize_type: mk.FusedMoEPrepareAndFinalize fused_experts_type: mk.FusedMoEPermuteExpertsUnpermute fused_moe_chunk_size: int | None world_size: int torch_trace_dir_path: str | None = None def __post_init__(self): if self.quant_config is None: self.quant_config = TestMoEQuantConfig(None, False, False, None) def describe(self) -> str: s = "" s += "== Config:\n" s += f" world_size={self.world_size}\n" s += f" PF={self.prepare_finalize_type.__name__}\n" s += f" FE={self.fused_experts_type.__name__}\n" s += f" E={self.E}\n" s += f" Ms={self.Ms}\n" s += f" N={self.N}\n" s += f" K={self.K}\n" s += f" topk={self.topks}\n" s += f" dtype={self.dtype}\n" s += f" fused_moe_chunk_size={self.fused_moe_chunk_size}\n" s += " Quant:\n" if self.quant_config is not None: s += f" q_dtype={self.quant_dtype}\n" s += f" q_block_shape={self.quant_block_shape}\n" s += f" q_per_out_ch_quant={self.is_per_out_ch_quant}\n" s += f" q_per_act_token={self.is_per_act_token_quant}\n" else: s += " quant=None\n" return s @property def M(self) -> int: assert isinstance(self.Ms, int) return self.Ms @property def quant_dtype(self) -> torch.dtype | str | None: assert self.quant_config is not None return self.quant_config.quant_dtype @property def is_per_act_token_quant(self) -> bool: assert self.quant_config is not None return self.quant_config.per_act_token_quant @property def is_per_tensor_act_quant(self) -> bool: return not self.is_per_act_token_quant and self.quant_block_shape is None @property def is_per_out_ch_quant(self) -> bool: assert self.quant_config is not None return self.quant_config.per_out_ch_quant @property def quant_block_shape(self) -> list[int] | None: assert self.quant_config is not None return self.quant_config.block_shape @property def topk(self) -> int: assert isinstance(self.topks, int) return self.topks @property def num_local_experts(self) -> int: return self.E // self.world_size def make_env_data(self) -> tuple[VllmConfig, dict[Any, Any]]: """ make env data for vllm launch. """ vllm_config = VllmConfig() vllm_config.parallel_config.data_parallel_size = self.world_size vllm_config.parallel_config.enable_expert_parallel = True env_dict = { "VLLM_USE_DEEP_GEMM": str(int(self.needs_deep_gemm())), } backend = self.all2all_backend() vllm_config.parallel_config.all2all_backend = backend if backend is not None: env_dict.update({"VLLM_ALL2ALL_BACKEND": backend}) if self.fused_moe_chunk_size is not None: env_dict.update( {"VLLM_FUSED_MOE_CHUNK_SIZE": str(self.fused_moe_chunk_size)} ) return vllm_config, env_dict def is_fp8_block_quantized(self): return ( self.quant_dtype == torch.float8_e4m3fn and self.quant_block_shape is not None ) def is_batched_prepare_finalize(self): info = prepare_finalize_info(self.prepare_finalize_type) return mk.FusedMoEActivationFormat.BatchedExperts == info.activation_format def is_batched_fused_experts(self): info = expert_info(self.fused_experts_type) return mk.FusedMoEActivationFormat.BatchedExperts == info.activation_format def is_standard_fused_experts(self): info = expert_info(self.fused_experts_type) return mk.FusedMoEActivationFormat.Standard == info.activation_format def fe_supported_types(self): info = expert_info(self.fused_experts_type) return info.supported_dtypes def pf_supported_types(self): info = prepare_finalize_info(self.prepare_finalize_type) return info.supported_dtypes def is_block_quant_supported(self): info = expert_info(self.fused_experts_type) return info.blocked_quantization_support def is_fe_supports_chunking(self): info = expert_info(self.fused_experts_type) return info.supports_chunking def supports_expert_map(self): info = expert_info(self.fused_experts_type) return info.supports_expert_map def supports_apply_weight_on_input(self): info = prepare_finalize_info(self.prepare_finalize_type) return info.supports_apply_weight_on_input def needs_deep_gemm(self): info = expert_info(self.fused_experts_type) return info.needs_deep_gemm def needs_pplx(self): info = prepare_finalize_info(self.prepare_finalize_type) return info.backend == "pplx" def needs_deep_ep(self): info = prepare_finalize_info(self.prepare_finalize_type) return ( info.backend == "deepep_high_throughput" or info.backend == "deepep_low_latency" ) def all2all_backend(self): info = prepare_finalize_info(self.prepare_finalize_type) return info.backend def is_valid(self) -> tuple[bool, str | None]: # Check prepare-finalize and fused-experts compatibility if self.is_batched_prepare_finalize(): if not self.is_batched_fused_experts(): return False, "Mismatched format." else: if not self.is_standard_fused_experts(): return False, "Mismatched format." use_chunking = self.fused_moe_chunk_size is not None if use_chunking and not self.is_fe_supports_chunking(): return False, "Chunking not supported." # Check quantization sanity if ( int(self.is_per_act_token_quant) + int(self.is_per_tensor_act_quant) + int(self.quant_block_shape is not None) ) > 1: # invalid quant config return False, f"Bad quant_config {self.quant_config}." # check type support if self.quant_dtype is None: if ( self.dtype not in self.pf_supported_types() or self.dtype not in self.fe_supported_types() ): return False, ( f"Unsupported type {self.dtype} not in " f"{self.pf_supported_types()} and " f"{self.fe_supported_types()}." ) else: if ( self.quant_dtype not in self.pf_supported_types() or self.quant_dtype not in self.fe_supported_types() ): return False, ( f"Unsupported quant type {self.quant_dtype} " f"not in {self.pf_supported_types()} and " f"{self.fe_supported_types()}." ) # Check block quantization support is_block_quantized = self.quant_block_shape is not None if is_block_quantized and self.quant_dtype is None: return False, "No block quantization support." if is_block_quantized and not self.is_block_quant_supported(): return False, "Mismatched block quantization support." # deep_gemm only works with block-quantized if self.needs_deep_gemm() and not is_block_quantized: return False, "Needs DeepGEMM but not block quantized." # Check dependencies (turn into asserts?) if self.needs_deep_ep() and not has_deep_ep(): return False, "Needs DeepEP, but DeepEP not available." if self.needs_deep_gemm() and not has_deep_gemm(): return False, "Needs DeepGEMM, but DeepGEMM not available." if self.needs_pplx() and not has_pplx(): # noqa: SIM103 return False, "Needs PPLX, but PPLX not available." return True, None @dataclass class WeightTensors: w1: torch.Tensor w2: torch.Tensor w1_scale: torch.Tensor | None w2_scale: torch.Tensor | None w1_gs: torch.Tensor | None = None w2_gs: torch.Tensor | None = None def describe(self): s = "" s += "== Weight Tensors: \n" s += f" - {_describe_tensor(self.w1, 'w1')} \n" s += f" - {_describe_tensor(self.w2, 'w2')} \n" s += f" - {_describe_tensor(self.w1_scale, 'w1_scale')} \n" s += f" - {_describe_tensor(self.w2_scale, 'w2_scale')} \n" s += f" - {_describe_tensor(self.w1_gs, 'w1_gs')} \n" s += f" - {_describe_tensor(self.w2_gs, 'w2_gs')} \n" return s def is_quantized(self) -> bool: # or w1_scale is not None? return ( self.w1.dtype == torch.float8_e4m3fn or self.w1.dtype == torch.uint8 or self.w1.dtype == torch.int8 ) def to_current_device(self): device = torch.cuda.current_device() self.w1 = self.w1.to(device=device) self.w2 = self.w2.to(device=device) if self.w1_scale is not None: self.w1_scale = self.w1_scale.to(device=device) if self.w2_scale is not None: self.w2_scale = self.w2_scale.to(device=device) if self.w1_gs is not None: self.w1_gs = self.w1_gs.to(device=device) if self.w2_gs is not None: self.w2_gs = self.w2_gs.to(device=device) def slice_weights(self, rank: int, num_local_experts: int) -> "WeightTensors": s = rank * num_local_experts e = s + num_local_experts w1 = self.w1[s:e, :, :] w2 = self.w2[s:e, :, :] w1_scale = self.w1_scale[s:e, :, :] if self.w1_scale is not None else None w2_scale = self.w2_scale[s:e, :, :] if self.w2_scale is not None else None w1_gs = self.w1_gs[s:e] if self.w1_gs is not None else None w2_gs = self.w2_gs[s:e] if self.w2_gs is not None else None return WeightTensors(w1, w2, w1_scale, w2_scale, w1_gs, w2_gs) @staticmethod def make(config: Config) -> "WeightTensors": (_, w1, w1_scale, w1_gs), (_, w2, w2_scale, w2_gs) = make_test_weights( e=config.E, n=config.N, k=config.K, in_dtype=config.dtype, quant_dtype=config.quant_dtype, block_shape=config.quant_block_shape, # or config.is_per_out_ch_quant per_out_ch_quant=config.is_per_act_token_quant, ) return WeightTensors( w1=w1, w2=w2, w1_scale=w1_scale, w2_scale=w2_scale, w1_gs=w1_gs, w2_gs=w2_gs ) @dataclass class RankTensors: hidden_states: torch.Tensor hidden_states_scale: torch.Tensor | None topk_weights: torch.Tensor topk_ids: torch.Tensor expert_map: torch.Tensor | None def describe(self): s = "" s += "== Rank Tensors: \n" s += f" - {_describe_tensor(self.hidden_states, 'HS')} \n" s += f" - {_describe_tensor(self.hidden_states_scale, 'HS_scale')} \n" s += f" - {_describe_tensor(self.topk_weights, 'topk_weights')} \n" s += f" - {_describe_tensor(self.topk_ids, 'topk_ids')} \n" s += f" - {_describe_tensor(self.expert_map, 'expert_map')} \n" return s @staticmethod def make_hidden_states( config: Config, ) -> tuple[torch.Tensor, torch.Tensor | None]: """ Return hidden_states """ m, k, dtype = (config.M, config.K, config.dtype) a = torch.randn((m, k), device=torch.cuda.current_device(), dtype=dtype) / 15.0 if config.quant_dtype is None: return a, None # We dequant and use that as hidden_states so the tests are stable. # quantizing and dequantizing yield slightly different results # depending on the hardware. Here we, quantize and dequantize # first - so further quantize and dequantize will yield the same # values. if config.is_per_tensor_act_quant: a_q, a_scales = ops.scaled_fp8_quant(a, use_per_token_if_dynamic=False) return a_q.float().mul(a_scales).to(dtype), a_scales if config.is_per_act_token_quant: a_q, a_scales = ops.scaled_fp8_quant(a, use_per_token_if_dynamic=True) return a_q.float().mul(a_scales).to(dtype), None assert config.quant_block_shape is not None block_k = config.quant_block_shape[1] a_q, a_scales = per_token_cast_to_fp8(a, block_size=block_k) return a_q.float().view((-1, block_k)).mul(a_scales.view(-1, 1)).view(m, k).to( dtype ), None @staticmethod def make(config: Config, pgi: ProcessGroupInfo): dtype = config.dtype topk, m, _ = (config.topk, config.M, config.K) hidden_states, hidden_states_scale = RankTensors.make_hidden_states(config) num_local_experts, global_num_experts = (config.num_local_experts, config.E) score = torch.randn((m, global_num_experts), device="cuda", dtype=dtype) topk_weights, topk_ids, _ = fused_topk(hidden_states, score, topk, False) # distribute topk_ids evenly for mi in range(m): topk_ids[mi] = torch.randperm(config.E)[:topk] topk_ids = topk_ids.to(device=torch.cuda.current_device()) expert_map = None if config.world_size > 1 and config.supports_expert_map(): expert_map = torch.full( (global_num_experts,), fill_value=-1, dtype=torch.int32 ) s = pgi.rank * num_local_experts e = s + num_local_experts expert_map[s:e] = torch.tensor(list(range(num_local_experts))) expert_map = expert_map.to( device=torch.cuda.current_device(), dtype=torch.int32 ) return RankTensors( hidden_states=hidden_states, hidden_states_scale=hidden_states_scale, topk_weights=topk_weights, topk_ids=topk_ids, expert_map=expert_map, ) def reference_moe_impl( config: Config, weights: WeightTensors, rank_tensors: RankTensors ) -> torch.Tensor: if config.quant_dtype == "nvfp4": quant_blocksize = 16 dtype = config.dtype w1_q = weights.w1 w1_blockscale = weights.w1_scale w1_gs = weights.w1_gs w2_q = weights.w2 w2_blockscale = weights.w2_scale w2_gs = weights.w2_gs a_global_scale = ( (FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX) / torch.amax(rank_tensors.hidden_states.flatten(), dim=-1) ).to(torch.float32) assert w1_gs is not None assert w2_gs is not None assert w1_blockscale is not None assert w2_blockscale is not None assert w1_blockscale.shape[1] % 128 == 0 assert w1_blockscale.shape[2] % 4 == 0 assert w2_blockscale.shape[1] % 128 == 0 assert w2_blockscale.shape[2] % 4 == 0 a_fp4, a_scale_interleaved = ops.scaled_fp4_quant( rank_tensors.hidden_states, a_global_scale ) a = dequantize_nvfp4_to_dtype( a_fp4, a_scale_interleaved, a_global_scale, dtype=dtype, device=a_fp4.device, block_size=quant_blocksize, ) e = w1_q.shape[0] n = w1_q.shape[1] // 2 k = w2_q.shape[1] w1 = torch.zeros((e, 2 * n, k), device="cuda", dtype=dtype) w2 = torch.zeros((e, k, n), device="cuda", dtype=dtype) for idx in range(0, e): w1[idx] = dequantize_nvfp4_to_dtype( w1_q[idx], w1_blockscale[idx], w1_gs[idx], dtype=dtype, device=w1_q.device, block_size=quant_blocksize, ) w2[idx] = dequantize_nvfp4_to_dtype( w2_q[idx], w2_blockscale[idx], w2_gs[idx], dtype=dtype, device=w2_q.device, block_size=quant_blocksize, ) a_scale = None w1_scale = None w2_scale = None quant_dtype = None per_act_token_quant = False block_shape = None else: a = rank_tensors.hidden_states a_scale = rank_tensors.hidden_states_scale w1 = weights.w1 w1_scale = weights.w1_scale w2 = weights.w2 w2_scale = weights.w2_scale quant_dtype = config.quant_dtype per_act_token_quant = config.is_per_act_token_quant block_shape = config.quant_block_shape return torch_experts( a=a, w1=w1, w2=w2, topk_weight=rank_tensors.topk_weights, topk_ids=rank_tensors.topk_ids, global_num_experts=config.E, expert_map=None, w1_scale=w1_scale, w2_scale=w2_scale, a1_scale=a_scale, quant_dtype=quant_dtype, per_act_token_quant=per_act_token_quant, block_shape=block_shape, apply_router_weights_on_input=config.topk == 1 and config.supports_apply_weight_on_input(), ) def _make_gscale(num_experts: int) -> torch.Tensor: return torch.ones( (num_experts,), device=torch.cuda.current_device(), dtype=torch.float32 ) def make_modular_kernel( config: Config, vllm_config: VllmConfig, quant_config: FusedMoEQuantConfig, ) -> mk.FusedMoEModularKernel: def next_power_of_2(x): import math if x == 0: return 1 return 2 ** math.ceil(math.log2(x)) # make moe config moe_parallel_config: FusedMoEParallelConfig = FusedMoEParallelConfig.make( tp_size_=get_tensor_model_parallel_world_size(), pcp_size_=get_pcp_group().world_size, dp_size_=get_dp_group().world_size, vllm_parallel_config=vllm_config.parallel_config, ) moe = FusedMoEConfig( num_experts=config.E, experts_per_token=config.topk, hidden_dim=config.K, num_local_experts=config.num_local_experts, moe_parallel_config=moe_parallel_config, in_dtype=config.dtype, max_num_tokens=next_power_of_2(config.M), ) # make modular kernel prepare_finalize = make_prepare_finalize( config.prepare_finalize_type, config.all2all_backend(), moe, quant_config ) fused_experts = make_fused_experts( config.fused_experts_type, moe, quant_config, prepare_finalize.num_dispatchers(), config.N, ) modular_kernel = mk.FusedMoEModularKernel( prepare_finalize=prepare_finalize, fused_experts=fused_experts, ) return modular_kernel def run_modular_kernel( pgi: ProcessGroupInfo, vllm_config: VllmConfig, config: Config, weights: WeightTensors, rank_tensors: RankTensors, ) -> torch.Tensor: assert isinstance(config.Ms, int) assert isinstance(config.topks, int) # weights for rank rank_weights = weights.slice_weights(pgi.rank, config.num_local_experts) if config.quant_dtype == "nvfp4": gscale = _make_gscale(config.num_local_experts) else: gscale = None quant_config = FusedMoEQuantConfig.make( config.quant_dtype, w1_scale=rank_weights.w1_scale, w2_scale=rank_weights.w2_scale, a1_scale=rank_tensors.hidden_states_scale, g1_alphas=(1 / rank_weights.w1_gs) if rank_weights.w1_gs is not None else None, g2_alphas=(1 / rank_weights.w2_gs) if rank_weights.w2_gs is not None else None, a1_gscale=gscale, a2_gscale=gscale, block_shape=config.quant_block_shape, per_act_token_quant=config.is_per_act_token_quant, per_out_ch_quant=config.is_per_out_ch_quant, ) mk = make_modular_kernel(config, vllm_config, quant_config) # impls might update the tensor in place hidden_states = rank_tensors.hidden_states.clone() topk_ids = rank_tensors.topk_ids.to(mk.prepare_finalize.topk_indices_dtype()) mk_kwargs = { "hidden_states": hidden_states, "w1": rank_weights.w1, "w2": rank_weights.w2, "topk_weights": rank_tensors.topk_weights, "topk_ids": topk_ids, "expert_map": rank_tensors.expert_map, "global_num_experts": config.E, "apply_router_weight_on_input": config.topk == 1 and config.supports_apply_weight_on_input(), } num_tokens = rank_tensors.hidden_states.shape[0] num_tokens_across_dp = torch.tensor( [num_tokens] * config.world_size, device="cuda", dtype=torch.int ) with set_forward_context( None, vllm_config, num_tokens=num_tokens, num_tokens_across_dp=num_tokens_across_dp, ): out = mk.forward(**mk_kwargs) return out
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/modular_kernel_tools/parallel_utils.py
tests/kernels/moe/modular_kernel_tools/parallel_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import dataclasses import os import traceback from collections.abc import Callable from typing import Any, Concatenate import torch from torch.multiprocessing import spawn # pyright: ignore[reportPrivateImportUsage] from typing_extensions import ParamSpec from vllm.config import VllmConfig, set_current_vllm_config from vllm.distributed import init_distributed_environment, initialize_model_parallel from vllm.utils.network_utils import get_open_port ## Parallel Processes Utils P = ParamSpec("P") @dataclasses.dataclass class ProcessGroupInfo: world_size: int world_local_size: int rank: int node_rank: int local_rank: int device: torch.device def _set_vllm_config( vllm_config: VllmConfig, world_size: int, rank: int, local_rank: int ): import tempfile temp_file = tempfile.mkstemp()[1] with set_current_vllm_config(vllm_config): init_distributed_environment( world_size=world_size, rank=rank, distributed_init_method=f"file://{temp_file}", local_rank=local_rank, backend="nccl", ) initialize_model_parallel( tensor_model_parallel_size=vllm_config.parallel_config.tensor_parallel_size, pipeline_model_parallel_size=vllm_config.parallel_config.pipeline_parallel_size, ) cpu_group = torch.distributed.new_group(list(range(world_size)), backend="gloo") return cpu_group def _worker_parallel_launch( local_rank: int, world_size: int, world_local_size: int, node_rank: int, init_method: str, worker: Callable[Concatenate[ProcessGroupInfo, VllmConfig | None, Any, P], None], vllm_config: VllmConfig | None, env_dict: dict | None, *args: P.args, **kwargs: P.kwargs, ) -> None: rank = node_rank * world_local_size + local_rank torch.cuda.set_device(local_rank) device = torch.device("cuda", local_rank) torch.distributed.init_process_group( backend="cpu:gloo,cuda:nccl", init_method=init_method, rank=rank, world_size=world_size, device_id=device, ) barrier = torch.tensor([rank], device=device) torch.distributed.all_reduce(barrier) if env_dict is not None: os.environ.update(env_dict) cpu_group = None if vllm_config is not None: cpu_group = _set_vllm_config(vllm_config, world_size, rank, local_rank) try: worker( ProcessGroupInfo( world_size=world_size, world_local_size=world_local_size, rank=rank, node_rank=node_rank, local_rank=local_rank, device=device, ), vllm_config, cpu_group, *args, **kwargs, ) except Exception as ex: print(ex) traceback.print_exc() raise finally: torch.distributed.destroy_process_group() def parallel_launch_with_config( world_size: int, worker: Callable[Concatenate[ProcessGroupInfo, VllmConfig, Any, P], None], vllm_config: VllmConfig, env_dict: dict[Any, Any], *args: P.args, **kwargs: P.kwargs, ) -> None: assert not kwargs spawn( _worker_parallel_launch, args=( world_size, world_size, 0, f"tcp://{os.getenv('LOCALHOST', 'localhost')}:{get_open_port()}", worker, vllm_config, env_dict, ) + args, nprocs=world_size, join=True, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/modular_kernel_tools/profile_modular_kernel.py
tests/kernels/moe/modular_kernel_tools/profile_modular_kernel.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import copy from collections.abc import Callable from itertools import product from typing import Any import torch from vllm.config import VllmConfig from vllm.platforms import current_platform from .common import Config, RankTensors, WeightTensors, make_modular_kernel from .parallel_utils import ProcessGroupInfo, parallel_launch_with_config def do_profile( fn: Callable, fn_kwargs: dict[Any, Any], pgi: ProcessGroupInfo, config: Config, num_warmups: int = 5, ): for _ in range(num_warmups): fn(**fn_kwargs) with torch.profiler.profile( activities=[ torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA, ], with_stack=True, record_shapes=True, ) as tprof: fn(**fn_kwargs) torch.cuda.synchronize(torch.cuda.current_device()) # TODO (varun): Add a descriptive trace file name tprof.export_chrome_trace( f"{config.torch_trace_dir_path}/m{config.M}_{pgi.rank}_trace.json" ) def profile_modular_kernel( pgi: ProcessGroupInfo, vllm_config: VllmConfig, config: Config, weights: WeightTensors, rank_tensors: RankTensors, ) -> None: assert isinstance(config.Ms, int) assert isinstance(config.topks, int) # weights for rank rank_weights = weights.slice_weights(pgi.rank, config.num_local_experts) # make modular kernel mk = make_modular_kernel(config, vllm_config, weights) mk_kwargs = { "hidden_states": rank_tensors.hidden_states, "w1": rank_weights.w1, "w2": rank_weights.w2, "topk_weights": rank_tensors.topk_weights, "topk_ids": rank_tensors.topk_ids, "expert_map": rank_tensors.expert_map, "w1_scale": rank_weights.w1_scale, "w2_scale": rank_weights.w2_scale, "a1_scale": rank_tensors.hidden_states_scale, "global_num_experts": config.E, "apply_router_weight_on_input": config.topk == 1, } do_profile(mk.forward, mk_kwargs, pgi, config) def rank_worker( pgi: ProcessGroupInfo, vllm_config: VllmConfig, cpu_group, config: Config, weights: WeightTensors, ): current_platform.seed_everything(pgi.rank) # sanity check from vllm import envs if config.fused_moe_chunk_size is not None: assert config.fused_moe_chunk_size == envs.VLLM_FUSED_MOE_CHUNK_SIZE # get weights to this device weights.to_current_device() Ms = config.Ms assert isinstance(Ms, list) TOPKs = config.topks assert isinstance(TOPKs, list) for m, topk in product(Ms, TOPKs): print(f"Running m={m}, topk={topk} ...") # override m and topk cfgx = copy.deepcopy(config) cfgx.Ms = m cfgx.topks = topk # inputs for rank rank_tensors = RankTensors.make(cfgx, pgi) profile_modular_kernel(pgi, vllm_config, cfgx, weights, rank_tensors) def run(config: Config): weights: WeightTensors = WeightTensors.make(config) vllm_config, env_dict = config.make_env_data() parallel_launch_with_config( config.world_size, rank_worker, vllm_config, env_dict, config, weights ) if __name__ == "__main__": from .cli_args import make_config, make_config_arg_parser parser = make_config_arg_parser( description=( "Run single prepare-finalize & fused-experts combination test" "Example : python3 -m tests.kernels.moe.modular_kernel_tools.profile_modular_kernel " # noqa: E501 "--pf-type PplxPrepareAndFinalize --experts-type BatchedTritonExperts" ) ) args = parser.parse_args() assert args.torch_trace_dir_path is not None, ( "Please pass in a directory to store torch traces" ) config = make_config(args) run(config)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/modular_kernel_tools/mk_objects.py
tests/kernels/moe/modular_kernel_tools/mk_objects.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from dataclasses import dataclass import torch # Fused experts and PrepareFinalize imports import vllm.model_executor.layers.fused_moe.modular_kernel as mk from vllm.model_executor.layers.fused_moe import TritonExperts from vllm.model_executor.layers.fused_moe.all2all_utils import ( maybe_make_prepare_finalize, ) from vllm.model_executor.layers.fused_moe.batched_deep_gemm_moe import ( BatchedDeepGemmExperts, ) from vllm.model_executor.layers.fused_moe.config import ( FusedMoEConfig, FusedMoEQuantConfig, ) from vllm.model_executor.layers.fused_moe.deep_gemm_moe import DeepGemmExperts from vllm.model_executor.layers.fused_moe.fused_batched_moe import ( BatchedTritonExperts, NaiveBatchedExperts, ) from vllm.model_executor.layers.fused_moe.prepare_finalize import ( MoEPrepareAndFinalizeNoEP, ) from vllm.model_executor.layers.fused_moe.triton_deep_gemm_moe import ( TritonOrDeepGemmExperts, ) from vllm.model_executor.layers.quantization.utils.quant_utils import ( cutlass_fp4_supported, ) from vllm.model_executor.layers.quantization.utils.w8a8_utils import ( cutlass_fp8_supported, ) from vllm.platforms import current_platform from vllm.utils.deep_gemm import is_deep_gemm_supported from vllm.utils.flashinfer import has_flashinfer_cutlass_fused_moe from vllm.utils.import_utils import has_deep_ep, has_deep_gemm, has_pplx @dataclass class TestMoEQuantConfig: quant_dtype: torch.dtype | str | None per_out_ch_quant: bool per_act_token_quant: bool block_shape: list[int] | None @dataclass class PrepareFinalizeInfo: activation_format: mk.FusedMoEActivationFormat supported_dtypes: list[torch.dtype | str] blocked_quantization_support: bool backend: str | None supports_apply_weight_on_input: bool = True @dataclass class ExpertInfo: activation_format: mk.FusedMoEActivationFormat supported_dtypes: list[torch.dtype | str] blocked_quantization_support: bool supports_chunking: bool supports_expert_map: bool needs_matching_quant: bool = False needs_deep_gemm: bool = False PREPARE_FINALIZE_INFO: dict[mk.FusedMoEPrepareAndFinalize, PrepareFinalizeInfo] = {} EXPERT_INFO: dict[mk.FusedMoEPermuteExpertsUnpermute, ExpertInfo] = {} MK_ALL_PREPARE_FINALIZE_TYPES: list[mk.FusedMoEPrepareAndFinalize] = [] MK_MULTI_GPU_PREPARE_FINALIZE_TYPES: list[mk.FusedMoEPrepareAndFinalize] = [] MK_SINGLE_GPU_PREPARE_FINALIZE_TYPES: list[mk.FusedMoEPrepareAndFinalize] = [] MK_FUSED_EXPERT_TYPES: list[mk.FusedMoEPermuteExpertsUnpermute] = [] standard_format = mk.FusedMoEActivationFormat.Standard batched_format = mk.FusedMoEActivationFormat.BatchedExperts common_float_types: list[torch.dtype | str] = [ torch.float8_e4m3fn, torch.bfloat16, torch.float16, torch.float32, ] common_float_and_int_types = common_float_types + [torch.int8] nvfp4_types = ["nvfp4"] fp8_types = [torch.float8_e4m3fn] def register_prepare_and_finalize( kind, activation_format: mk.FusedMoEActivationFormat, supported_dtypes: list[torch.dtype | str], blocked_quantization_support: bool, backend: str | None, force_multigpu: bool = False, supports_apply_weight_on_input: bool = True, ): global PREPARE_FINALIZE_INFO global MK_ALL_PREPARE_FINALIZE_TYPES global MK_MULTI_GPU_PREPARE_FINALIZE_TYPES global MK_SINGLE_GPU_PREPARE_FINALIZE_TYPES assert kind not in PREPARE_FINALIZE_INFO PREPARE_FINALIZE_INFO[kind] = PrepareFinalizeInfo( activation_format, supported_dtypes, blocked_quantization_support, backend, supports_apply_weight_on_input, ) MK_ALL_PREPARE_FINALIZE_TYPES.append(kind) if backend is not None or force_multigpu: MK_MULTI_GPU_PREPARE_FINALIZE_TYPES.append(kind) else: MK_SINGLE_GPU_PREPARE_FINALIZE_TYPES.append(kind) def register_experts( kind, activation_format: mk.FusedMoEActivationFormat, supported_dtypes: list[torch.dtype | str], blocked_quantization_support: bool, supports_chunking: bool, supports_expert_map: bool, needs_matching_quant: bool = False, needs_deep_gemm: bool = False, ): global EXPERT_INFO global MK_FUSED_EXPERT_TYPES assert kind not in EXPERT_INFO EXPERT_INFO[kind] = ExpertInfo( activation_format, supported_dtypes, blocked_quantization_support, supports_chunking, supports_expert_map, needs_matching_quant, needs_deep_gemm, ) MK_FUSED_EXPERT_TYPES.append(kind) def prepare_finalize_info(kind) -> PrepareFinalizeInfo: info = PREPARE_FINALIZE_INFO.get(kind) assert info is not None return info def expert_info(kind) -> ExpertInfo: info = EXPERT_INFO.get(kind) assert info is not None return info register_prepare_and_finalize( MoEPrepareAndFinalizeNoEP, standard_format, common_float_types, blocked_quantization_support=True, backend=None, ) register_experts( BatchedTritonExperts, batched_format, common_float_types, blocked_quantization_support=True, supports_chunking=False, supports_expert_map=False, needs_matching_quant=True, ) register_experts( TritonExperts, standard_format, common_float_and_int_types, blocked_quantization_support=True, supports_chunking=True, supports_expert_map=True, needs_matching_quant=True, ) register_experts( NaiveBatchedExperts, batched_format, common_float_and_int_types, blocked_quantization_support=True, supports_chunking=False, supports_expert_map=True, ) # Disable on blackwell for now if has_deep_ep() and not current_platform.has_device_capability(100): from vllm.model_executor.layers.fused_moe.deepep_ht_prepare_finalize import ( DeepEPHTPrepareAndFinalize, ) from vllm.model_executor.layers.fused_moe.deepep_ll_prepare_finalize import ( DeepEPLLPrepareAndFinalize, ) register_prepare_and_finalize( DeepEPHTPrepareAndFinalize, standard_format, common_float_types, blocked_quantization_support=True, backend="deepep_high_throughput", ) register_prepare_and_finalize( DeepEPLLPrepareAndFinalize, batched_format, common_float_types, blocked_quantization_support=True, backend="deepep_low_latency", ) if has_pplx(): from vllm.model_executor.layers.fused_moe.pplx_prepare_finalize import ( PplxPrepareAndFinalize, ) register_prepare_and_finalize( PplxPrepareAndFinalize, batched_format, common_float_and_int_types, blocked_quantization_support=True, backend="pplx", ) if has_flashinfer_cutlass_fused_moe() and current_platform.has_device_capability(100): from vllm.model_executor.layers.fused_moe.flashinfer_cutlass_moe import ( FlashInferExperts, ) from vllm.model_executor.layers.fused_moe.flashinfer_cutlass_prepare_finalize import ( # noqa: E501 FlashInferCutlassMoEPrepareAndFinalize, create_flashinfer_prepare_finalize, ) register_prepare_and_finalize( FlashInferCutlassMoEPrepareAndFinalize, standard_format, nvfp4_types + fp8_types, blocked_quantization_support=True, backend=None, force_multigpu=True, supports_apply_weight_on_input=False, ) register_experts( FlashInferExperts, standard_format, nvfp4_types + fp8_types, blocked_quantization_support=True, supports_chunking=True, # Note: this is a hack to get it to run for now supports_expert_map=True, ) else: FlashInferCutlassMoEPrepareAndFinalize = None if has_deep_gemm() and is_deep_gemm_supported(): register_experts( BatchedDeepGemmExperts, batched_format, fp8_types, blocked_quantization_support=True, supports_chunking=False, supports_expert_map=False, needs_matching_quant=False, needs_deep_gemm=True, ) register_experts( DeepGemmExperts, standard_format, fp8_types, blocked_quantization_support=True, supports_chunking=True, supports_expert_map=True, needs_matching_quant=False, needs_deep_gemm=True, ) register_experts( TritonOrDeepGemmExperts, standard_format, common_float_and_int_types, blocked_quantization_support=True, supports_chunking=True, supports_expert_map=True, needs_matching_quant=True, needs_deep_gemm=True, ) if cutlass_fp8_supported(): from vllm.model_executor.layers.fused_moe import ( CutlassBatchedExpertsFp8, CutlassExpertsFp8, ) register_experts( CutlassExpertsFp8, standard_format, fp8_types, blocked_quantization_support=False, supports_chunking=True, supports_expert_map=False, ) register_experts( CutlassBatchedExpertsFp8, batched_format, fp8_types, blocked_quantization_support=False, supports_chunking=False, supports_expert_map=False, ) if cutlass_fp4_supported(): from vllm.model_executor.layers.fused_moe.cutlass_moe import CutlassExpertsFp4 register_experts( CutlassExpertsFp4, standard_format, nvfp4_types, blocked_quantization_support=True, supports_chunking=True, supports_expert_map=False, ) MK_QUANT_CONFIGS: list[TestMoEQuantConfig | None] = [ None, # per-channel / per-column weights and per-tensor activations TestMoEQuantConfig( quant_dtype=torch.float8_e4m3fn, per_out_ch_quant=True, per_act_token_quant=False, block_shape=None, ), # per-channel / per-column weights and per-token activations TestMoEQuantConfig( quant_dtype=torch.float8_e4m3fn, per_out_ch_quant=True, per_act_token_quant=True, block_shape=None, ), # per-tensor weights and per-tensor activations TestMoEQuantConfig( quant_dtype=torch.float8_e4m3fn, per_out_ch_quant=False, per_act_token_quant=False, block_shape=None, ), # per-tensor weights and per-token activations TestMoEQuantConfig( quant_dtype=torch.float8_e4m3fn, per_out_ch_quant=False, per_act_token_quant=True, block_shape=None, ), # block-quantized weights and 128 block per-token activations TestMoEQuantConfig( quant_dtype=torch.float8_e4m3fn, per_out_ch_quant=False, per_act_token_quant=False, block_shape=[128, 128], ), # TODO (varun) : Should we test the following combinations ? # block-quantized weights and per-token activations # block-quantized weights and per-tensor activations ] if cutlass_fp4_supported() or has_flashinfer_cutlass_fused_moe(): MK_QUANT_CONFIGS += [ TestMoEQuantConfig( quant_dtype="nvfp4", per_out_ch_quant=False, per_act_token_quant=False, block_shape=None, ), ] def make_prepare_finalize( prepare_finalize_type: mk.FusedMoEPrepareAndFinalize, backend: str | None, moe: FusedMoEConfig, quant_config: FusedMoEQuantConfig, ) -> mk.FusedMoEPrepareAndFinalize: if backend != "naive" and backend is not None: prepare_finalize = maybe_make_prepare_finalize(moe, quant_config) assert prepare_finalize is not None return prepare_finalize elif prepare_finalize_type == FlashInferCutlassMoEPrepareAndFinalize: return create_flashinfer_prepare_finalize( use_dp=moe.moe_parallel_config.dp_size > 1 ) else: return MoEPrepareAndFinalizeNoEP() def _slice(rank: int, num_local_experts: int, t: torch.Tensor) -> torch.Tensor: s = rank * num_local_experts e = s + num_local_experts return t[s:e] def make_cutlass_strides( e: int, n: int, k: int, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: ab_strides1 = torch.full((e,), k, device="cuda", dtype=torch.int64) ab_strides2 = torch.full((e,), n, device="cuda", dtype=torch.int64) c_strides1 = torch.full((e,), 2 * n, device="cuda", dtype=torch.int64) c_strides2 = torch.full((e,), k, device="cuda", dtype=torch.int64) return ab_strides1, ab_strides2, c_strides1, c_strides2 def make_fused_experts( fused_experts_type: mk.FusedMoEPermuteExpertsUnpermute, moe: FusedMoEConfig, quant_config: FusedMoEQuantConfig, num_dispatchers: int, N: int, ) -> mk.FusedMoEPermuteExpertsUnpermute: batch_kwargs = { "max_num_tokens": moe.max_num_tokens, "num_dispatchers": num_dispatchers, } quant_kwargs = { "quant_config": quant_config, } deepgemm_kwargs = {"allow_deep_gemm": has_deep_gemm()} torch.set_printoptions(threshold=0, edgeitems=0, linewidth=10000) if fused_experts_type == BatchedDeepGemmExperts: kwargs = batch_kwargs | quant_kwargs print(f"Making BatchedDeepGemmExperts {kwargs} ...") experts = BatchedDeepGemmExperts(**kwargs) elif fused_experts_type == BatchedTritonExperts: kwargs = batch_kwargs | quant_kwargs print(f"Making BatchedTritonExperts {kwargs} ...") experts = BatchedTritonExperts(**kwargs) elif fused_experts_type == DeepGemmExperts: print(f"Making DeepGemmExperts {quant_config} ...") experts = DeepGemmExperts(quant_config) elif fused_experts_type == TritonExperts: kwargs = quant_kwargs print(f"Making TritonExperts {kwargs} ...") experts = TritonExperts(**kwargs) elif fused_experts_type == TritonOrDeepGemmExperts: kwargs = quant_kwargs | deepgemm_kwargs print(f"Making TritonOrDeepGemmExperts {kwargs} ...") experts = TritonOrDeepGemmExperts(**kwargs) elif fused_experts_type == NaiveBatchedExperts: kwargs = batch_kwargs | quant_kwargs print(f"Making NaiveBatchedExperts {kwargs} ...") experts = NaiveBatchedExperts(**kwargs) elif fused_experts_type == CutlassExpertsFp8: strides = make_cutlass_strides(moe.num_experts, N, moe.hidden_dim) kwargs = { "out_dtype": moe.in_dtype, "ab_strides1": strides[0], "ab_strides2": strides[1], "c_strides1": strides[2], "c_strides2": strides[3], } | quant_kwargs print(f"Making CutlassExpertsFp8 {kwargs} ...") experts = CutlassExpertsFp8(**kwargs) elif fused_experts_type == CutlassBatchedExpertsFp8: strides = make_cutlass_strides(moe.num_experts, N, moe.hidden_dim) kwargs = { "max_experts_per_worker": moe.num_local_experts, "num_dispatchers": num_dispatchers, "out_dtype": moe.in_dtype, "ab_strides1": strides[0], "ab_strides2": strides[1], "c_strides1": strides[2], "c_strides2": strides[3], } | quant_kwargs print(f"Making CutlassBatchedExpertsFp8 {kwargs} ...") experts = CutlassBatchedExpertsFp8(**kwargs) elif fused_experts_type == CutlassExpertsFp4: kwargs = { "max_experts_per_worker": moe.num_local_experts, "num_dispatchers": num_dispatchers, "out_dtype": moe.in_dtype, } | quant_kwargs print(f"Making CutlassExpertsFp4 {kwargs} ...") experts = CutlassExpertsFp4(**kwargs) elif fused_experts_type == FlashInferExperts: kwargs = { "out_dtype": moe.in_dtype, "ep_rank": moe.ep_rank, "ep_size": moe.ep_size, "tp_rank": moe.tp_rank, "tp_size": moe.tp_size, } | quant_kwargs print(f"Making FlashInferExperts {kwargs} ...") experts = FlashInferExperts(**kwargs) else: raise RuntimeError(f"Unknown fused experts type: {fused_experts_type}") torch.set_printoptions(threshold=1000, edgeitems=5, linewidth=80) return experts
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/modular_kernel_tools/__init__.py
tests/kernels/moe/modular_kernel_tools/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/kernels/moe/modular_kernel_tools/cli_args.py
tests/kernels/moe/modular_kernel_tools/cli_args.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse import torch import vllm.model_executor.layers.fused_moe.modular_kernel as mk from vllm.model_executor.layers.fused_moe.config import FusedMoEQuantConfig from .common import Config from .mk_objects import ( MK_ALL_PREPARE_FINALIZE_TYPES, MK_FUSED_EXPERT_TYPES, MK_SINGLE_GPU_PREPARE_FINALIZE_TYPES, ) def make_config_arg_parser(description: str): def to_pf_class_type(s: str) -> mk.FusedMoEPrepareAndFinalize: for pf in MK_ALL_PREPARE_FINALIZE_TYPES: if pf.__name__ == s: return pf raise ValueError(f"Cannot find a PrepareFinalize type that matches {s}") def to_experts_class_type(s: str) -> mk.FusedMoEPermuteExpertsUnpermute: for fe in MK_FUSED_EXPERT_TYPES: if fe.__name__ == s: return fe raise ValueError(f"Cannot find a FusedExperts type that matches {s}") def to_quant_torch_dtype(s: str) -> torch.dtype: if s == "torch.float8_e4m3fn": return torch.float8_e4m3fn raise ValueError(f"Unsupported quant type {s}") parser = argparse.ArgumentParser(description=description) parser.add_argument( "--world-size", type=int, default=2, help="Number of ranks that participate in all2all", ) parser.add_argument( "--pf-type", type=to_pf_class_type, required=True, help=( "Choose a PrepareFinalize Type : " f"{[x.__name__ for x in MK_ALL_PREPARE_FINALIZE_TYPES]}" ), ) parser.add_argument( "--experts-type", type=to_experts_class_type, required=True, help=( f"Choose a FusedExpert type : {[x.__name__ for x in MK_FUSED_EXPERT_TYPES]}" ), ) parser.add_argument( "-m", nargs="+", type=int, default=[64], help="num tokens per rank", ) parser.add_argument( "-k", type=int, default=7168, help="hidden-size", ) parser.add_argument( "-n", type=int, default=1024, help="N dimension of the first fused-moe matmul", ) parser.add_argument( "--num-experts", type=int, default=32, help="Global num experts" ) parser.add_argument("--topk", nargs="+", type=int, default=[4, 1], help="num topk") parser.add_argument( "--fused-moe-chunk-size", type=int, help="Fused moe chunk size used for the non-batched fused experts impl.", ) # Quant args parser.add_argument( "--quant-dtype", type=to_quant_torch_dtype, help="Quant datatype" ) parser.add_argument( "--per-token-quantized-activations", action="store_true", help=("The input activations must be per-token quantized"), ) parser.add_argument( "--per-channel-quantized-weights", action="store_true", help="The weights must be per-channel quantized.", ) parser.add_argument( "--block-shape", nargs="+", type=int, help="Quantization block shape" ) # Torch trace profile generation args parser.add_argument( "--torch-trace-dir-path", type=str, default=None, help="Get torch trace for single execution", ) return parser def _validate_args(args: argparse.Namespace): if args.quant_dtype is not None: assert args.quant_dtype == torch.float8_e4m3fn if args.block_shape is not None: assert len(args.block_shape) == 2, ( f"block shape must have 2 elements. got {args.block_shape}" ) if args.experts_type in MK_SINGLE_GPU_PREPARE_FINALIZE_TYPES: assert args.world_size == 1, "Single GPU objects need world size set to 1" if args.torch_trace_dir_path is not None: from pathlib import Path assert Path(args.torch_trace_dir_path).is_dir(), ( f"Please create {args.torch_trace_dir_path}" ) def make_config(args: argparse.Namespace) -> Config: _validate_args(args) quant_config = None if args.quant_dtype is not None: quant_config = FusedMoEQuantConfig( quant_dtype=args.quant_dtype, per_act_token_quant=args.per_token_quantized_activations, per_out_ch_quant=args.per_channel_quantized_weights, block_shape=args.block_shape, ) return Config( Ms=args.m, K=args.k, N=args.n, E=args.num_experts, topks=args.topk, dtype=torch.bfloat16, # hard-code quant_config=quant_config, prepare_finalize_type=args.pf_type, fused_experts_type=args.experts_type, fused_moe_chunk_size=args.fused_moe_chunk_size, world_size=args.world_size, torch_trace_dir_path=args.torch_trace_dir_path, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/.buildkite/check-wheel-size.py
.buildkite/check-wheel-size.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os import sys import zipfile # Read the VLLM_MAX_SIZE_MB environment variable, defaulting to 500 MiB # Note that we have 800 MiB quota, please use it wisely. # See https://github.com/pypi/support/issues/6326 . # Please also sync the value with the one in Dockerfile. VLLM_MAX_SIZE_MB = int(os.environ.get("VLLM_MAX_SIZE_MB", 500)) def print_top_10_largest_files(zip_file): """Print the top 10 largest files in the given zip file.""" with zipfile.ZipFile(zip_file, "r") as z: file_sizes = [(f, z.getinfo(f).file_size) for f in z.namelist()] file_sizes.sort(key=lambda x: x[1], reverse=True) for f, size in file_sizes[:10]: print(f"{f}: {size / (1024 * 1024):.2f} MBs uncompressed.") def check_wheel_size(directory): """Check the size of .whl files in the given directory.""" for root, _, files in os.walk(directory): for file_name in files: if file_name.endswith(".whl"): wheel_path = os.path.join(root, file_name) wheel_size_mb = os.path.getsize(wheel_path) / (1024 * 1024) if wheel_size_mb > VLLM_MAX_SIZE_MB: print( f"Not allowed: Wheel {wheel_path} is larger " f"({wheel_size_mb:.2f} MB) than the limit " f"({VLLM_MAX_SIZE_MB} MB)." ) print_top_10_largest_files(wheel_path) return 1 else: print( f"Wheel {wheel_path} is within the allowed size " f"({wheel_size_mb:.2f} MB)." ) return 0 if __name__ == "__main__": if len(sys.argv) < 2: print("Usage: python check-wheel-size.py <directory>") sys.exit(1) directory = sys.argv[1] sys.exit(check_wheel_size(directory))
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/.buildkite/scripts/generate-nightly-index.py
.buildkite/scripts/generate-nightly-index.py
#!/usr/bin/env python3 # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # do not complain about line length (for docstring) # ruff: noqa: E501 import argparse import json import sys from dataclasses import asdict, dataclass from datetime import datetime from pathlib import Path from typing import Any from urllib.parse import quote import regex as re if not sys.version_info >= (3, 12): raise RuntimeError("This script requires Python 3.12 or higher.") INDEX_HTML_TEMPLATE = """<!DOCTYPE html> <html> <!-- {comment} --> <meta name="pypi:repository-version" content="1.0"> <body> {items} </body> </html> """ @dataclass class WheelFileInfo: package_name: str version: str build_tag: str | None python_tag: str abi_tag: str platform_tag: str variant: str | None filename: str def parse_from_filename(file: str) -> WheelFileInfo: """ Parse wheel file name to extract metadata. The format of wheel names: {package_name}-{version}(-{build_tag})?-{python_tag}-{abi_tag}-{platform_tag}.whl All versions could contain a variant like '+cu129' or '.cpu' or `.rocm` (or not). Example: vllm-0.11.0-cp38-abi3-manylinux1_x86_64.whl vllm-0.10.2rc2+cu129-cp38-abi3-manylinux2014_aarch64.whl vllm-0.11.1rc8.dev14+gaa384b3c0-cp38-abi3-manylinux2014_aarch64.whl vllm-0.11.1rc8.dev14+gaa384b3c0.cu130-cp38-abi3-manylinux1_x86_64.whl """ wheel_file_re = re.compile( r"^(?P<package_name>.+)-(?P<version>[^-]+?)(-(?P<build_tag>[^-]+))?-(?P<python_tag>[^-]+)-(?P<abi_tag>[^-]+)-(?P<platform_tag>[^-]+)\.whl$" ) match = wheel_file_re.match(file) if not match: raise ValueError(f"Invalid wheel file name: {file}") package_name = match.group("package_name") version = match.group("version") build_tag = match.group("build_tag") python_tag = match.group("python_tag") abi_tag = match.group("abi_tag") platform_tag = match.group("platform_tag") # extract variant from version variant = None if "dev" in version: ver_after_dev = version.split("dev")[-1] if "." in ver_after_dev: variant = ver_after_dev.split(".")[-1] version = version.removesuffix("." + variant) else: if "+" in version: version, variant = version.split("+") return WheelFileInfo( package_name=package_name, version=version, build_tag=build_tag, python_tag=python_tag, abi_tag=abi_tag, platform_tag=platform_tag, variant=variant, filename=file, ) def generate_project_list(subdir_names: list[str], comment: str = "") -> str: """ Generate project list HTML content linking to each project & variant sub-directory. """ href_tags = [] for name in sorted(subdir_names): name = name.strip("/").strip(".") href_tags.append(f' <a href="{name}/">{name}/</a><br/>') return INDEX_HTML_TEMPLATE.format(items="\n".join(href_tags), comment=comment) def generate_package_index_and_metadata( wheel_files: list[WheelFileInfo], wheel_base_dir: Path, index_base_dir: Path, comment: str = "", ) -> tuple[str, str]: """ Generate package index HTML content for a specific package, linking to actual wheel files. """ href_tags = [] metadata = [] for file in sorted(wheel_files, key=lambda x: x.filename): relative_path = ( wheel_base_dir.relative_to(index_base_dir, walk_up=True) / file.filename ) # handle with '+' in URL, and avoid double-encoding '/' and already-encoded '%2B' # NOTE: this is AWS S3 specific behavior! file_path_quoted = quote(relative_path.as_posix(), safe=":%/") href_tags.append(f' <a href="{file_path_quoted}">{file.filename}</a><br/>') file_meta = asdict(file) file_meta["path"] = file_path_quoted metadata.append(file_meta) index_str = INDEX_HTML_TEMPLATE.format(items="\n".join(href_tags), comment=comment) metadata_str = json.dumps(metadata, indent=2) return index_str, metadata_str def generate_index_and_metadata( whl_files: list[str], wheel_base_dir: Path, index_base_dir: Path, default_variant: str | None = None, alias_to_default: str | None = None, comment: str = "", ): """ Generate index for all wheel files. Args: whl_files (list[str]): List of wheel files (must be directly under `wheel_base_dir`). wheel_base_dir (Path): Base directory for wheel files. index_base_dir (Path): Base directory to store index files. default_variant (str | None): The default variant name, if any. alias_to_default (str | None): Alias variant name for the default variant, if any. comment (str | None): Optional comment to include in the generated HTML files. First, parse all wheel files to extract metadata. We need to collect all wheel files for each variant, and generate an index for it (in a sub-directory). The index for the default variant (if any) is generated in the root index directory. If `default_variant` is provided, all wheels must have variant suffixes, and the default variant index is purely a copy of the corresponding variant index, with only the links adjusted. Otherwise, all wheels without variant suffixes are treated as the default variant. If `alias_to_default` is provided, an additional alias sub-directory is created, it has the same content as the default variant index, but the links are adjusted accordingly. Index directory structure: index_base_dir/ (hosted at wheels.vllm.ai/{nightly,$commit,$version}/) index.html # project list, linking to "vllm/" and other packages, and all variant sub-directories vllm/ index.html # package index, pointing to actual files in wheel_base_dir (relative path) metadata.json # machine-readable metadata for all wheels in this package cpu/ # cpu variant sub-directory index.html vllm/ index.html metadata.json cu129/ # cu129 is actually the alias to default variant index.html vllm/ index.html metadata.json cu130/ # cu130 variant sub-directory index.html vllm/ index.html metadata.json ... metadata.json stores a dump of all wheel files' metadata in a machine-readable format: [ { "package_name": "vllm", "version": "0.10.2rc2", "build_tag": null, "python_tag": "cp38", "abi_tag": "abi3", "platform_tag": "manylinux2014_aarch64", "variant": "cu129", "filename": "vllm-0.10.2rc2+cu129-cp38-abi3-manylinux2014_aarch64.whl", "path": "../vllm-0.10.2rc2%2Bcu129-cp38-abi3-manylinux2014_aarch64.whl" # to be concatenated with the directory URL and URL-encoded }, ... ] """ parsed_files = [parse_from_filename(f) for f in whl_files] if not parsed_files: print("No wheel files found, skipping index generation.") return # Group by variant variant_to_files: dict[str, list[WheelFileInfo]] = {} for file in parsed_files: variant = file.variant or "default" if variant not in variant_to_files: variant_to_files[variant] = [] variant_to_files[variant].append(file) print(f"Found variants: {list(variant_to_files.keys())}") # sanity check for default variant if default_variant: if "default" in variant_to_files: raise ValueError( "All wheel files must have variant suffixes when `default_variant` is specified." ) if default_variant not in variant_to_files: raise ValueError( f"Default variant '{default_variant}' not found among wheel files." ) if alias_to_default: if "default" not in variant_to_files: # e.g. only some wheels are uploaded to S3 currently print( "[WARN] Alias to default variant specified, but no default variant found." ) elif alias_to_default in variant_to_files: raise ValueError( f"Alias variant name '{alias_to_default}' already exists among wheel files." ) else: variant_to_files[alias_to_default] = variant_to_files["default"].copy() print(f"Alias variant '{alias_to_default}' created for default variant.") # Generate comment in HTML header comment_str = f" ({comment})" if comment else "" comment_tmpl = f"Generated on {datetime.now().isoformat()}{comment_str}" # Generate index for each variant subdir_names = set() for variant, files in variant_to_files.items(): if variant == "default": variant_dir = index_base_dir else: variant_dir = index_base_dir / variant subdir_names.add(variant) variant_dir.mkdir(parents=True, exist_ok=True) # gather all package names in this variant packages = set(f.package_name for f in files) if variant == "default": # these packages should also appear in the "project list" # generate after all variants are processed subdir_names = subdir_names.union(packages) else: # generate project list for this variant directly project_list_str = generate_project_list(sorted(packages), comment_tmpl) with open(variant_dir / "index.html", "w") as f: f.write(project_list_str) for package in packages: # filter files belonging to this package only package_files = [f for f in files if f.package_name == package] package_dir = variant_dir / package package_dir.mkdir(parents=True, exist_ok=True) index_str, metadata_str = generate_package_index_and_metadata( package_files, wheel_base_dir, package_dir, comment ) with open(package_dir / "index.html", "w") as f: f.write(index_str) with open(package_dir / "metadata.json", "w") as f: f.write(metadata_str) # Generate top-level project list index project_list_str = generate_project_list(sorted(subdir_names), comment_tmpl) with open(index_base_dir / "index.html", "w") as f: f.write(project_list_str) if __name__ == "__main__": """ Arguments: --version <version> : version string for the current build (e.g., commit hash) --wheel-dir <wheel_directory> : directory containing wheel files (default to be same as `version`) --current-objects <path_to_json> : path to JSON file containing current S3 objects listing in this version directory --output-dir <output_directory> : directory to store generated index files --alias-to-default <alias_variant_name> : (optional) alias variant name for the default variant --comment <comment_string> : (optional) comment string to include in generated HTML files """ parser = argparse.ArgumentParser( description="Process nightly build wheel files to generate indices." ) parser.add_argument( "--version", type=str, required=True, help="Version string for the current build (e.g., commit hash)", ) parser.add_argument( "--current-objects", type=str, required=True, help="Path to JSON file containing current S3 objects listing in this version directory", ) parser.add_argument( "--output-dir", type=str, required=True, help="Directory to store generated index files", ) parser.add_argument( "--wheel-dir", type=str, default=None, help="Directory containing wheel files (default to be same as `version`)", ) parser.add_argument( "--alias-to-default", type=str, default=None, help="Alias variant name for the default variant", ) parser.add_argument( "--comment", type=str, default="", help="Optional comment string to include in generated HTML files", ) args = parser.parse_args() version = args.version if "/" in version or "\\" in version: raise ValueError("Version string must not contain slashes.") current_objects_path = Path(args.current_objects) output_dir = Path(args.output_dir) if not output_dir.exists(): output_dir.mkdir(parents=True, exist_ok=True) # Read current objects JSON with open(current_objects_path) as f: current_objects: dict[str, list[dict[str, Any]]] = json.load(f) # current_objects looks like from list_objects_v2 S3 API: """ "Contents": [ { "Key": "e2f56c309d2a28899c68975a7e104502d56deb8f/vllm-0.11.2.dev363+ge2f56c309-cp38-abi3-manylinux1_x86_64.whl", "LastModified": "2025-11-28T14:00:32+00:00", "ETag": "\"37a38339c7cdb61ca737021b968075df-52\"", "ChecksumAlgorithm": [ "CRC64NVME" ], "ChecksumType": "FULL_OBJECT", "Size": 435649349, "StorageClass": "STANDARD" }, ... ] """ # Extract wheel file keys wheel_files = [] for item in current_objects.get("Contents", []): key: str = item["Key"] if key.endswith(".whl"): wheel_files.append(key.split("/")[-1]) # only the filename is used print(f"Found {len(wheel_files)} wheel files for version {version}: {wheel_files}") # keep only "official" files for a non-nightly version (specified by cli args) PY_VERSION_RE = re.compile(r"^\d+\.\d+\.\d+([a-zA-Z0-9.+-]*)?$") if PY_VERSION_RE.match(version): # upload-wheels.sh ensures no "dev" is in args.version wheel_files = list( filter(lambda x: version in x and "dev" not in x, wheel_files) ) print(f"Non-nightly version detected, wheel files used: {wheel_files}") else: print("Nightly version detected, keeping all wheel files.") # Generate index and metadata, assuming wheels and indices are stored as: # s3://vllm-wheels/{wheel_dir}/<wheel files> # s3://vllm-wheels/<anything>/<index files> wheel_dir = args.wheel_dir or version wheel_base_dir = Path(output_dir).parent / wheel_dir.strip().rstrip("/") index_base_dir = Path(output_dir) generate_index_and_metadata( whl_files=wheel_files, wheel_base_dir=wheel_base_dir, index_base_dir=index_base_dir, default_variant=None, alias_to_default=args.alias_to_default, comment=args.comment.strip(), ) print(f"Successfully generated index and metadata in {output_dir}")
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/.buildkite/performance-benchmarks/scripts/compare-json-results.py
.buildkite/performance-benchmarks/scripts/compare-json-results.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from __future__ import annotations import argparse import html as _html import json import os from dataclasses import dataclass from importlib import util import pandas as pd pd.options.display.float_format = "{:.2f}".format plotly_found = util.find_spec("plotly.express") is not None DEFAULT_INFO_COLS = [ "Model", "Dataset Name", "Input Len", "Output Len", # "TP Size", # "PP Size", "# of max concurrency.", "qps", ] # Safety net: if any DataFrame leaks into to_html(), keep precision at 2. pd.set_option("display.precision", 2) pd.set_option("display.float_format", lambda x: f"{x:.2f}") # ----------------------------- # Core data compare # ----------------------------- def compare_data_columns( files: list[str], name_column: str, data_column: str, info_cols: list[str], drop_column: str, debug: bool = False, ): """ Align concatenation by keys derived from info_cols instead of row order. - Pick one canonical key list: subset of info_cols present in ALL files. - For each file: set index to those keys, aggregate duplicates (mean for metric, first for names). - Concat along axis=1 (indexes align), then reset_index so callers can group by columns. - If --debug, add a <file_label>_name column per file. """ print("\ncompare_data_column:", data_column) frames = [] raw_data_cols: list[str] = [] compare_frames = [] cols_per_file: list[set] = [] for f in files: try: df_tmp = pd.read_json(f, orient="records") except Exception as err: raise ValueError(f"Failed to read {f}") from err cols_per_file.append(set(df_tmp.columns)) key_cols = [c for c in info_cols if all(c in cset for cset in cols_per_file)] if not key_cols: key_cols = [c for c in info_cols if c in list(cols_per_file[0])] if not key_cols: raise ValueError( "No common key columns found from info_cols across the input files." ) meta_added = False for file in files: df = pd.read_json(file, orient="records") if drop_column in df.columns: df = df.dropna(subset=[drop_column], ignore_index=True) for c in ( "Input Len", "Output Len", "TP Size", "PP Size", "# of max concurrency.", "qps", ): if c in df.columns: df[c] = pd.to_numeric(df[c], errors="coerce") for c in key_cols: if c not in df.columns: df[c] = pd.NA df_idx = df.set_index(key_cols, drop=False) meta = df_idx[key_cols] if not meta.index.is_unique: meta = meta.groupby(level=key_cols, dropna=False).first() file_label = "/".join(file.split("/")[:-1]) or os.path.basename(file) s = df_idx[data_column] if not s.index.is_unique: s = s.groupby(level=key_cols, dropna=False).mean() s.name = file_label if not meta_added: frames.append(meta) meta_added = True if debug and name_column in df_idx.columns: name_s = df_idx[name_column] if not name_s.index.is_unique: name_s = name_s.groupby(level=key_cols, dropna=False).first() name_s.name = f"{file_label}_name" frames.append(name_s) frames.append(s) raw_data_cols.append(file_label) compare_frames.append(s) if len(compare_frames) >= 2: base = compare_frames[0] current = compare_frames[-1] if "P99" in data_column or "Median" in data_column: ratio = base / current else: ratio = current / base ratio = ratio.mask(base == 0) ratio.name = f"Ratio 1 vs {len(compare_frames)}" frames.append(ratio) concat_df = pd.concat(frames, axis=1).reset_index(drop=True) front = [c for c in info_cols if c in concat_df.columns] rest = [c for c in concat_df.columns if c not in front] concat_df = concat_df[front + rest] print(raw_data_cols) return concat_df, raw_data_cols # ----------------------------- # Split helper # ----------------------------- def split_json_by_tp_pp( input_file: str = "benchmark_results.json", output_root: str = "." ) -> list[str]: with open(input_file, encoding="utf-8") as f: data = json.load(f) if isinstance(data, dict): for key in ("results", "serving_results", "benchmarks", "data"): if isinstance(data.get(key), list): data = data[key] break df = pd.DataFrame(data) name_col = next( (c for c in ["Test name", "test_name", "Test Name"] if c in df.columns), None ) if name_col: df = df[ df[name_col].astype(str).str.contains(r"serving", case=False, na=False) ].copy() rename_map = { "tp_size": "TP Size", "tensor_parallel_size": "TP Size", "pp_size": "PP Size", "pipeline_parallel_size": "PP Size", } df.rename( columns={k: v for k, v in rename_map.items() if k in df.columns}, inplace=True ) if "TP Size" not in df.columns: df["TP Size"] = 1 if "PP Size" not in df.columns: df["PP Size"] = 1 df["TP Size"] = pd.to_numeric(df["TP Size"], errors="coerce").fillna(1).astype(int) df["PP Size"] = pd.to_numeric(df["PP Size"], errors="coerce").fillna(1).astype(int) saved_paths: list[str] = [] for (tp, pp), group_df in df.groupby(["TP Size", "PP Size"], dropna=False): folder_name = os.path.join(output_root, f"tp{int(tp)}_pp{int(pp)}") os.makedirs(folder_name, exist_ok=True) filepath = os.path.join(folder_name, "benchmark_results.json") group_df.to_json(filepath, orient="records", indent=2, force_ascii=False) print(f"Saved: {filepath}") saved_paths.append(filepath) return saved_paths # ----------------------------- # Styling helpers # ----------------------------- def _find_concurrency_col(df: pd.DataFrame) -> str: for c in [ "# of max concurrency.", "# of max concurrency", "Max Concurrency", "max_concurrency", "Concurrency", ]: if c in df.columns: return c for c in df.columns: if df[c].dtype.kind in "iu" and df[c].nunique() > 1 and df[c].min() >= 1: return c return "# of max concurrency." def _highlight_threshold( df: pd.DataFrame, threshold: float ) -> pd.io.formats.style.Styler: conc_col = _find_concurrency_col(df) key_cols = [ c for c in ["Model", "Dataset Name", "Input Len", "Output Len", conc_col] if c in df.columns ] conf_cols = [ c for c in df.columns if c not in key_cols and not str(c).startswith("Ratio") ] conf_cols = [c for c in conf_cols if pd.api.types.is_numeric_dtype(df[c])] return df.style.map( lambda v: "background-color:#e6ffe6;font-weight:bold;" if pd.notna(v) and v <= threshold else "", subset=conf_cols, ) def highlight_ratio_columns(styler: pd.io.formats.style.Styler): ratio_cols = [c for c in styler.data.columns if "ratio" in str(c).lower()] if not ratio_cols: return styler styler = styler.apply( lambda _: ["background-color: #fff3b0"] * len(styler.data), subset=ratio_cols, axis=0, ) styler = styler.set_table_styles( [ { "selector": f"th.col_heading.level0.col{i}", "props": [("background-color", "#fff3b0")], } for i, col in enumerate(styler.data.columns) if col in ratio_cols ], overwrite=False, ) return styler def _apply_two_decimals( styler: pd.io.formats.style.Styler, ) -> pd.io.formats.style.Styler: df = styler.data num_cols = df.select_dtypes("number").columns if len(num_cols) == 0: return styler return styler.format({c: "{:.2f}" for c in num_cols}, na_rep="") # ----------------------------- # Valid max concurrency summary helpers # ----------------------------- def _config_value_columns(df: pd.DataFrame, conc_col: str) -> list[str]: key_cols = [ c for c in ["Model", "Dataset Name", "Input Len", "Output Len"] if c in df.columns ] exclude = set(key_cols + [conc_col, "qps", "QPS"]) cols: list[str] = [] for c in df.columns: if c in exclude: continue lc = str(c).lower() if lc.startswith("ratio"): continue if lc.endswith("_name") or lc == "test name" or lc == "test_name": continue if pd.api.types.is_numeric_dtype(df[c]): cols.append(c) return cols def _max_concurrency_ok( df: pd.DataFrame, conc_col: str, cfg_col: str, threshold: float ): if df is None or conc_col not in df.columns or cfg_col not in df.columns: return pd.NA d = df[[conc_col, cfg_col]].copy() d[conc_col] = pd.to_numeric(d[conc_col], errors="coerce") d[cfg_col] = pd.to_numeric(d[cfg_col], errors="coerce") d = d.dropna(subset=[conc_col, cfg_col]) if d.empty: return pd.NA ok = d[d[cfg_col] <= threshold] if ok.empty: return pd.NA return ok[conc_col].max() def _value_at_concurrency(df: pd.DataFrame, conc_col: str, cfg_col: str, conc_value): if ( df is None or conc_col not in df.columns or cfg_col not in df.columns or pd.isna(conc_value) ): return pd.NA d = df[[conc_col, cfg_col]].copy() d[conc_col] = pd.to_numeric(d[conc_col], errors="coerce") d[cfg_col] = pd.to_numeric(d[cfg_col], errors="coerce") conc_value = pd.to_numeric(conc_value, errors="coerce") if pd.isna(conc_value): return pd.NA hit = d[d[conc_col] == conc_value] if hit.empty: return pd.NA return hit[cfg_col].iloc[0] def build_valid_max_concurrency_summary_html( tput_group_df: pd.DataFrame | None, ttft_group_df: pd.DataFrame | None, tpot_group_df: pd.DataFrame | None, conc_col: str, args, ) -> str: if ttft_group_df is None and tpot_group_df is None: return "" ttft_cols = ( _config_value_columns(ttft_group_df, conc_col) if ttft_group_df is not None else [] ) tpot_cols = ( _config_value_columns(tpot_group_df, conc_col) if tpot_group_df is not None else [] ) tput_cols = ( _config_value_columns(tput_group_df, conc_col) if tput_group_df is not None else [] ) if ttft_group_df is not None and tpot_group_df is not None: cfg_cols = [c for c in ttft_cols if c in tpot_cols] if tput_group_df is not None: cfg_cols = [c for c in cfg_cols if c in tput_cols] or cfg_cols else: cfg_cols = ttft_cols or tpot_cols if not cfg_cols: cfg_cols = sorted(set(ttft_cols) | set(tpot_cols) | set(tput_cols), key=str) rows = [] for cfg in cfg_cols: ttft_max = ( _max_concurrency_ok(ttft_group_df, conc_col, cfg, args.ttft_max_ms) if ttft_group_df is not None else pd.NA ) tpot_max = ( _max_concurrency_ok(tpot_group_df, conc_col, cfg, args.tpot_max_ms) if tpot_group_df is not None else pd.NA ) both = ( pd.NA if (pd.isna(ttft_max) or pd.isna(tpot_max)) else min(ttft_max, tpot_max) ) tput_at_both = ( _value_at_concurrency(tput_group_df, conc_col, cfg, both) if tput_group_df is not None else pd.NA ) ttft_at_both = ( _value_at_concurrency(ttft_group_df, conc_col, cfg, both) if ttft_group_df is not None else pd.NA ) tpot_at_both = ( _value_at_concurrency(tpot_group_df, conc_col, cfg, both) if tpot_group_df is not None else pd.NA ) rows.append( { "Configuration": cfg, f"Max {conc_col} (TTFT ≤ {args.ttft_max_ms:g} ms)": ttft_max, f"Max {conc_col} (TPOT ≤ {args.tpot_max_ms:g} ms)": tpot_max, f"Max {conc_col} (Both)": both, "Output Tput @ Both (tok/s)": tput_at_both, "TTFT @ Both (ms)": ttft_at_both, "TPOT @ Both (ms)": tpot_at_both, } ) summary_df = pd.DataFrame(rows) # --- Coerce numeric columns so Styler doesn't miss them due to object dtype --- for c in summary_df.columns: if c == "Configuration": continue summary_df[c] = pd.to_numeric(summary_df[c], errors="coerce") both_col = f"Max {conc_col} (Both)" # --- Strict 2-decimal formatting for ALL non-Configuration columns --- formatters = {} for c in summary_df.columns: if c == "Configuration": continue # default argument binds per-column formatter correctly formatters[c] = lambda v: "" if pd.isna(v) else f"{float(v):.2f}" styler = summary_df.style.format(formatters) def _green(v): return "background-color:#e6ffe6;font-weight:bold;" if pd.notna(v) else "" if both_col in summary_df.columns: styler = styler.map(_green, subset=[both_col]) title = ( '<div style="font-size: 1.15em; font-weight: 700; margin: 12px 0 6px 0;">' "Valid Max Concurrency Summary" "</div>\n" ) return title + styler.to_html(table_attributes='border="1" class="dataframe"') # ----------------------------- # Plot helper # ----------------------------- def _add_limit_line(fig, y_value: float, label: str): fig.add_hline( y=y_value, line_dash="dash", line_color="red" if "ttft" in label.lower() else "blue", annotation_text=f"{label}: {y_value} ms", annotation_position="top left", ) if plotly_found: import plotly.graph_objects as go fig.add_trace( go.Scatter( x=[None], y=[None], mode="lines", line=dict( dash="dash", color="red" if "ttft" in label.lower() else "blue", ), name=label, ) ) # ----------------------------- # Refactored main + group-first report # ----------------------------- @dataclass(frozen=True) class MetricPlan: data_cols: list[str] drop_column: str def build_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument( "-f", "--file", action="append", type=str, help="input file name" ) parser.add_argument( "--debug", action="store_true", help="show all information for debugging" ) parser.add_argument( "--plot", action=argparse.BooleanOptionalAction, default=True, help="plot perf diagrams or not --no-plot --plot", ) parser.add_argument( "-x", "--xaxis", type=str, default="# of max concurrency.", help="column name to use as X Axis in comparison graph", ) parser.add_argument( "-l", "--latency", type=str, default="p99", help="take median|p99 for latency like TTFT/TPOT", ) parser.add_argument( "--ttft-max-ms", type=float, default=3000.0, help="Reference limit for TTFT plots (ms)", ) parser.add_argument( "--tpot-max-ms", type=float, default=100.0, help="Reference limit for TPOT plots (ms)", ) return parser def choose_metrics(latency: str) -> MetricPlan: latency = (latency or "").lower() drop_column = "P99" if "median" in latency: return MetricPlan( data_cols=["Output Tput (tok/s)", "Median TTFT (ms)", "Median"], drop_column=drop_column, ) return MetricPlan( data_cols=["Output Tput (tok/s)", "P99 TTFT (ms)", "P99"], drop_column=drop_column, ) def prepare_input_files(args, info_cols: list[str]) -> tuple[list[str], list[str]]: if not args.file: raise ValueError("No input files provided. Use -f/--file.") if len(args.file) == 1: files = split_json_by_tp_pp(args.file[0], output_root="splits") info_cols = [c for c in info_cols if c not in ("TP Size", "PP Size")] else: files = args.file return files, info_cols def get_y_axis_col(info_cols: list[str], xaxis: str) -> str: y_axis_index = info_cols.index(xaxis) if xaxis in info_cols else 6 return info_cols[y_axis_index] def get_group_cols(output_df: pd.DataFrame, info_cols: list[str]) -> list[str]: filtered_info_cols = info_cols[:4] group_cols = [c for c in filtered_info_cols if c in output_df.columns] if not group_cols: raise ValueError( f"No valid group-by columns. Expected subset: {filtered_info_cols}, " f"but DataFrame has: {list(output_df.columns)}" ) return group_cols def normalize_group_key(name): return name if isinstance(name, tuple) else (name,) def group_filename(name, prefix: str = "perf_comparison_") -> str: name_vals = normalize_group_key(name) safe = ",".join(map(str, name_vals)).replace(",", "_").replace("/", "-") return f"{prefix}{safe}.html" def build_group_suffix(group_cols: list[str], name) -> str: name_vals = normalize_group_key(name) return " , ".join(f"{col} : [ {val} ] " for col, val in zip(group_cols, name_vals)) def render_metric_table_html( display_group: pd.DataFrame, metric_label: str, group_suffix: str, args, ) -> str: title = ( f'<div style="font-size: 1.25em; font-weight: 600; margin: 12px 0;">' f"{_html.escape(metric_label)}" f" — {_html.escape(group_suffix)}" f"</div>\n" ) metric_name = metric_label.lower() if "ttft" in metric_name: styler = _highlight_threshold(display_group, args.ttft_max_ms) elif ("tpot" in metric_name) or ("median" in metric_name) or ("p99" in metric_name): styler = _highlight_threshold(display_group, args.tpot_max_ms) else: styler = display_group.style styler = _apply_two_decimals(styler) styler = highlight_ratio_columns(styler) return title + styler.to_html(table_attributes='border="1" class="dataframe"') def maybe_write_plot( main_fh, sub_fh, group_df: pd.DataFrame, raw_data_cols: list[str], metric_label: str, y_axis_col: str, args, ): if not (args.plot and plotly_found): return import plotly.express as px df = group_df[raw_data_cols].sort_values(by=y_axis_col) df_melted = df.melt( id_vars=y_axis_col, var_name="Configuration", value_name=metric_label, ) fig = px.line( df_melted, x=y_axis_col, y=metric_label, color="Configuration", title=f"{metric_label} vs {y_axis_col}", markers=True, ) # Ensure plot hover + y tick labels are also 2 decimals. fig.update_traces(hovertemplate="%{y:.2f}<extra></extra>") fig.update_yaxes(tickformat=".2f") metric_name = metric_label.lower() if "ttft" in metric_name: _add_limit_line(fig, args.ttft_max_ms, "TTFT limit") elif ("tpot" in metric_name) or ("median" in metric_name) or ("p99" in metric_name): _add_limit_line(fig, args.tpot_max_ms, "TPOT limit") html = fig.to_html(full_html=True, include_plotlyjs="cdn") main_fh.write(html) sub_fh.write(html) def build_group_keys( df: pd.DataFrame, group_cols: list[str], sort_cols: list[str] | None = None ): if sort_cols: df = df.sort_values(by=sort_cols) gb = df.groupby(group_cols, dropna=False) return [k for k, _ in gb] def write_report_group_first( files: list[str], info_cols: list[str], plan: MetricPlan, args ): name_column = "Test name" y_axis_col = get_y_axis_col(info_cols, args.xaxis) print("comparing : " + ", ".join(files)) metric_cache: dict[str, tuple[pd.DataFrame, list[str]]] = {} group_cols_canonical: list[str] | None = None for metric_label in plan.data_cols: output_df, raw_data_cols = compare_data_columns( files, name_column, metric_label, info_cols, plan.drop_column, debug=args.debug, ) raw_data_cols = list(raw_data_cols) raw_data_cols.insert(0, y_axis_col) group_cols = get_group_cols(output_df, info_cols) if group_cols_canonical is None: group_cols_canonical = group_cols else: group_cols_canonical = [c for c in group_cols_canonical if c in group_cols] metric_cache[metric_label] = ( output_df.sort_values(by=args.xaxis), raw_data_cols, ) if not group_cols_canonical: raise ValueError("No canonical group columns found across metrics.") first_metric = plan.data_cols[0] first_df_sorted, _ = metric_cache[first_metric] group_keys = build_group_keys( first_df_sorted, group_cols_canonical, sort_cols=[args.xaxis] ) metric_groupbys = { metric_label: df.groupby(group_cols_canonical, dropna=False) for metric_label, (df, _) in metric_cache.items() } with open("perf_comparison.html", "w", encoding="utf-8") as main_fh: main_fh.write('<meta charset="utf-8">\n') for gkey in group_keys: gkey_tuple = normalize_group_key(gkey) suffix = build_group_suffix(group_cols_canonical, gkey_tuple) sub_path = group_filename(gkey_tuple) group_header = ( '<div style="font-size: 1.4em; font-weight: 700; ' 'margin: 18px 0 10px 0;">' f"{_html.escape(suffix)}" "</div>\n" ) main_fh.write(group_header) with open(sub_path, "w", encoding="utf-8") as sub_fh: sub_fh.write('<meta charset="utf-8">\n') sub_fh.write(group_header) tput_group_df = None ttft_group_df = None tpot_group_df = None conc_col = args.xaxis for metric_label in plan.data_cols: gb = metric_groupbys[metric_label] df_sorted, raw_data_cols = metric_cache[metric_label] try: group_df = gb.get_group(gkey) except KeyError: missing = ( '<div style="font-size: 1.1em; font-weight: 600; ' 'margin: 10px 0;">' f"{_html.escape(metric_label)} — missing for this group" "</div>\n" ) main_fh.write(missing) sub_fh.write(missing) continue if conc_col not in group_df.columns: conc_col = _find_concurrency_col(group_df) mn = metric_label.lower().strip() if "tok/s" in mn: tput_group_df = group_df elif "ttft" in mn: ttft_group_df = group_df elif mn in ("p99", "median") or "tpot" in mn: tpot_group_df = group_df display_group = group_df.drop( columns=group_cols_canonical, errors="ignore" ) html = render_metric_table_html( display_group, metric_label, suffix, args ) main_fh.write(html) sub_fh.write(html) maybe_write_plot( main_fh, sub_fh, group_df=group_df, raw_data_cols=raw_data_cols, metric_label=metric_label, y_axis_col=y_axis_col, args=args, ) summary_html = build_valid_max_concurrency_summary_html( tput_group_df=tput_group_df, ttft_group_df=ttft_group_df, tpot_group_df=tpot_group_df, conc_col=conc_col, args=args, ) if summary_html: main_fh.write(summary_html) sub_fh.write(summary_html) def main(): args = build_parser().parse_args() info_cols = list(DEFAULT_INFO_COLS) plan = choose_metrics(args.latency) files, info_cols = prepare_input_files(args, info_cols) write_report_group_first(files, info_cols, plan, args) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/.buildkite/performance-benchmarks/scripts/convert-results-json-to-markdown.py
.buildkite/performance-benchmarks/scripts/convert-results-json-to-markdown.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse import json import os import shlex from importlib import util from pathlib import Path from typing import Any import pandas as pd import psutil import regex as re from tabulate import tabulate # latency results and the keys that will be printed into markdown latency_results = [] latency_column_mapping = { "test_name": "Test name", "gpu_type": "GPU", "avg_latency": "Mean latency (ms)", # "P10": "P10 (s)", # "P25": "P25 (s)", "P50": "Median latency (ms)", # "P75": "P75 (s)", # "P90": "P90 (s)", "P99": "P99 latency (ms)", } # throughput tests and the keys that will be printed into markdown throughput_results = [] throughput_results_column_mapping = { "test_name": "Test name", "gpu_type": "GPU", "num_requests": "# of req.", "total_num_tokens": "Total # of tokens", "elapsed_time": "Elapsed time (s)", "requests_per_second": "Tput (req/s)", "tokens_per_second": "Tput (tok/s)", } # serving results and the keys that will be printed into markdown serving_results = [] serving_column_mapping = { "test_name": "Test name", "model_id": "Model", "dataset_name": "Dataset Name", "input_len": "Input Len", "output_len": "Output Len", "tp_size": "TP Size", "pp_size": "PP Size", "dtype": "dtype", "gpu_type": "GPU", "completed": "# of req.", "qps": "qps", "max_concurrency": "# of max concurrency.", "request_throughput": "Tput (req/s)", "total_token_throughput": "Total Token Tput (tok/s)", "output_throughput": "Output Tput (tok/s)", # "total_input_tokens": "Total input tokens", # "total_output_tokens": "Total output tokens", "mean_ttft_ms": "Mean TTFT (ms)", "median_ttft_ms": "Median TTFT (ms)", "p99_ttft_ms": "P99 TTFT (ms)", "std_ttft_ms": "STD TTFT (ms)", "mean_tpot_ms": "Mean TPOT (ms)", "median_tpot_ms": "Median", "p99_tpot_ms": "P99", "std_tpot_ms": "STD TPOT (ms)", "mean_itl_ms": "Mean ITL (ms)", "median_itl_ms": "Median ITL (ms)", "p99_itl_ms": "P99 ITL (ms)", } def read_markdown(file): if os.path.exists(file): with open(file) as f: return f.read() + "\n" else: return f"{file} not found.\n" def results_to_json(latency, throughput, serving): return json.dumps( { "latency": latency.to_dict(), "throughput": throughput.to_dict(), "serving": serving.to_dict(), } ) def get_size_with_unit(bytes, suffix="B"): """ Scale bytes to its proper format e.g: 1253656 => '1.20MB' 1253656678 => '1.17GB' """ factor = 1024 for unit in ["", "K", "M", "G", "T", "P"]: if bytes < factor: return f"{bytes:.2f}{unit}{suffix}" bytes /= factor def _coerce(val: str) -> Any: """Best-effort type coercion from string to Python types.""" low = val.lower() if low == "null": return None if low == "true": return True if low == "false": return False # integers if re.fullmatch(r"[+-]?\d+", val): try: return int(val) except ValueError: pass # floats (keep 'inf'/'-inf'/'nan' as strings) if re.fullmatch(r"[+-]?\d*\.\d+", val): try: return float(val) except ValueError: pass return val def parse_client_command(cmd: str) -> dict[str, Any]: """Parse the client_command shell string into {executable, script, args}.""" toks = shlex.split(cmd) if len(toks) < 2: raise ValueError("client_command must include an executable and a script") executable, script = toks[0], toks[1] args: dict[str, Any] = {} i = 2 while i < len(toks): t = toks[i] if t.startswith("--"): # --key=value or --key (value) or boolean flag if "=" in t: key, val = t.split("=", 1) if key == "--metadata": md = {} if val: if "=" in val: k, v = val.split("=", 1) md[k] = _coerce(v) else: md[val] = True args[key] = md else: args[key] = _coerce(val) i += 1 continue key = t # Special: consume metadata k=v pairs until next --flag if key == "--metadata": i += 1 md = {} while i < len(toks) and not toks[i].startswith("--"): pair = toks[i] if "=" in pair: k, v = pair.split("=", 1) md[k] = _coerce(v) else: md[pair] = True i += 1 args[key] = md continue # Standard: check if next token is a value (not a flag) if i + 1 < len(toks) and not toks[i + 1].startswith("--"): args[key] = _coerce(toks[i + 1]) i += 2 else: # lone flag -> True args[key] = True i += 1 else: # unexpected positional; skip i += 1 return {"executable": executable, "script": script, "args": args} if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-r", "--result", type=str, default="results", help="Folder name for benchmark output results.", ) args = parser.parse_args() results_folder = Path(args.result) if not results_folder.exists(): raise FileNotFoundError(f"results folder does not exist: {results_folder}") # collect results for test_file in results_folder.glob("*.json"): with open(test_file) as f: raw_result = json.loads(f.read()) if "serving" in str(test_file): # this result is generated via `vllm bench serve` command # attach the benchmarking command to raw_result try: with open(test_file.with_suffix(".commands")) as f: command = json.loads(f.read()) except OSError as e: print(e) continue # Parse Server Command Arg out: dict[str, Any] = { "server_command": parse_client_command(command["server_command"]) } parse_args = [ "--tensor-parallel-size", "--pipeline-parallel-size", "--dtype", ] col_mapping = ["tp_size", "pp_size", "dtype"] for index, arg in enumerate(parse_args): if arg in out["server_command"]["args"]: raw_result.update( {col_mapping[index]: out["server_command"]["args"][arg]} ) # Parse Client Command Arg out: dict[str, Any] = { "client_command": parse_client_command(command["client_command"]) } parse_args = [ "--dataset-name", "--random-input-len", "--random-output-len", "--request-rate", ] col_mapping = ["dataset_name", "input_len", "output_len", "qps"] for index, arg in enumerate(parse_args): if arg in out["client_command"]["args"]: raw_result.update( {col_mapping[index]: out["client_command"]["args"][arg]} ) # Add Server, Client command raw_result.update(command) # update the test name of this result raw_result.update({"test_name": test_file.stem}) # add the result to raw_result serving_results.append(raw_result) continue elif "latency" in f.name: # this result is generated via `vllm bench latency` command # attach the benchmarking command to raw_result try: with open(test_file.with_suffix(".commands")) as f: command = json.loads(f.read()) except OSError as e: print(e) continue raw_result.update(command) # update the test name of this result raw_result.update({"test_name": test_file.stem}) # get different percentiles for perc in [10, 25, 50, 75, 90, 99]: # Multiply 1000 to convert the time unit from s to ms raw_result.update( {f"P{perc}": 1000 * raw_result["percentiles"][str(perc)]} ) raw_result["avg_latency"] = raw_result["avg_latency"] * 1000 # add the result to raw_result latency_results.append(raw_result) continue elif "throughput" in f.name: # this result is generated via `vllm bench throughput` command # attach the benchmarking command to raw_result try: with open(test_file.with_suffix(".commands")) as f: command = json.loads(f.read()) except OSError as e: print(e) continue raw_result.update(command) # update the test name of this result raw_result.update({"test_name": test_file.stem}) # add the result to raw_result throughput_results.append(raw_result) continue print(f"Skipping {test_file}") latency_results = pd.DataFrame.from_dict(latency_results) serving_results = pd.DataFrame.from_dict(serving_results) throughput_results = pd.DataFrame.from_dict(throughput_results) svmem = psutil.virtual_memory() platform_data = { "Physical cores": [psutil.cpu_count(logical=False)], "Total cores": [psutil.cpu_count(logical=True)], "Total Memory": [get_size_with_unit(svmem.total)], } if util.find_spec("numa") is not None: from numa import info platform_data["Total NUMA nodes"] = [info.get_num_configured_nodes()] if util.find_spec("cpuinfo") is not None: from cpuinfo import get_cpu_info platform_data["CPU Brand"] = [get_cpu_info()["brand_raw"]] platform_results = pd.DataFrame.from_dict( platform_data, orient="index", columns=["Platform Info"] ) raw_results_json = results_to_json( latency_results, throughput_results, serving_results ) # remapping the key, for visualization purpose if not latency_results.empty: latency_results = latency_results[list(latency_column_mapping.keys())].rename( columns=latency_column_mapping ) if not serving_results.empty: valid_columns = [ col for col in serving_column_mapping if col in serving_results.columns ] serving_results = serving_results[valid_columns].rename( columns=serving_column_mapping ) if not throughput_results.empty: throughput_results = throughput_results[ list(throughput_results_column_mapping.keys()) ].rename(columns=throughput_results_column_mapping) processed_results_json = results_to_json( latency_results, throughput_results, serving_results ) for df in [latency_results, serving_results, throughput_results]: if df.empty: continue # Sort all dataframes by their respective "Test name" columns df.sort_values(by="Test name", inplace=True) # The GPUs sometimes come in format of "GPUTYPE\nGPUTYPE\n...", # we want to turn it into "8xGPUTYPE" df["GPU"] = df["GPU"].apply( lambda x: "{}x{}".format(len(x.split("\n")), x.split("\n")[0]) ) # get markdown tables latency_md_table = tabulate( latency_results, headers="keys", tablefmt="pipe", showindex=False ) serving_md_table = tabulate( serving_results, headers="keys", tablefmt="pipe", showindex=False ) throughput_md_table = tabulate( throughput_results, headers="keys", tablefmt="pipe", showindex=False ) platform_md_table = tabulate( platform_results, headers="keys", tablefmt="pipe", showindex=True ) # document the result md_file = "benchmark_results.md" json_file = "benchmark_results.json" with open(results_folder / md_file, "w") as f: results = read_markdown( "../.buildkite/performance-benchmarks/" + "performance-benchmarks-descriptions.md" ) results = results.format( latency_tests_markdown_table=latency_md_table, throughput_tests_markdown_table=throughput_md_table, serving_tests_markdown_table=serving_md_table, platform_markdown_table=platform_md_table, benchmarking_results_in_json_string=processed_results_json, ) f.write(results) # document benchmarking results in json with open(results_folder / json_file, "w") as f: results = ( latency_results.to_dict(orient="records") + throughput_results.to_dict(orient="records") + serving_results.to_dict(orient="records") ) f.write(json.dumps(results))
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/.buildkite/lm-eval-harness/conftest.py
.buildkite/lm-eval-harness/conftest.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from pathlib import Path import pytest def pytest_addoption(parser): parser.addoption( "--config-list-file", action="store", help="Path to the file listing model config YAMLs (one per line)", ) parser.addoption( "--tp-size", action="store", default="1", help="Tensor parallel size to use for evaluation", ) @pytest.fixture(scope="session") def config_list_file(pytestconfig, config_dir): rel_path = pytestconfig.getoption("--config-list-file") return config_dir / rel_path @pytest.fixture(scope="session") def tp_size(pytestconfig): return pytestconfig.getoption("--tp-size") def pytest_generate_tests(metafunc): if "config_filename" in metafunc.fixturenames: rel_path = metafunc.config.getoption("--config-list-file") config_list_file = Path(rel_path).resolve() config_dir = config_list_file.parent with open(config_list_file, encoding="utf-8") as f: configs = [ config_dir / line.strip() for line in f if line.strip() and not line.startswith("#") ] metafunc.parametrize("config_filename", configs)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/.buildkite/lm-eval-harness/test_lm_eval_correctness.py
.buildkite/lm-eval-harness/test_lm_eval_correctness.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ LM eval harness on model to compare vs HF baseline computed offline. Configs are found in configs/$MODEL.yaml pytest -s -v test_lm_eval_correctness.py \ --config-list-file=configs/models-small.txt \ --tp-size=1 """ import os from contextlib import contextmanager import lm_eval import numpy as np import yaml DEFAULT_RTOL = 0.08 @contextmanager def scoped_env_vars(new_env: dict[str, str]): if not new_env: # Fast path: nothing to do yield return old_values = {} new_keys = [] try: for key, value in new_env.items(): if key in os.environ: old_values[key] = os.environ[key] else: new_keys.append(key) os.environ[key] = str(value) yield finally: # Restore / clean up for key, value in old_values.items(): os.environ[key] = value for key in new_keys: os.environ.pop(key, None) def launch_lm_eval(eval_config, tp_size): trust_remote_code = eval_config.get("trust_remote_code", False) max_model_len = eval_config.get("max_model_len", 4096) batch_size = eval_config.get("batch_size", "auto") backend = eval_config.get("backend", "vllm") enforce_eager = eval_config.get("enforce_eager", "true") kv_cache_dtype = eval_config.get("kv_cache_dtype", "auto") model_args = ( f"pretrained={eval_config['model_name']}," f"tensor_parallel_size={tp_size}," f"enforce_eager={enforce_eager}," f"kv_cache_dtype={kv_cache_dtype}," f"add_bos_token=true," f"trust_remote_code={trust_remote_code}," f"max_model_len={max_model_len}," ) env_vars = eval_config.get("env_vars", None) with scoped_env_vars(env_vars): results = lm_eval.simple_evaluate( model=backend, model_args=model_args, tasks=[task["name"] for task in eval_config["tasks"]], num_fewshot=eval_config["num_fewshot"], limit=eval_config["limit"], # TODO(yeq): using chat template w/ fewshot_as_multiturn is supposed help # text models. however, this is regressing measured strict-match for # existing text models in CI, so only apply it for mm, or explicitly set apply_chat_template=eval_config.get( "apply_chat_template", backend == "vllm-vlm" ), fewshot_as_multiturn=eval_config.get("fewshot_as_multiturn", False), # Forward decoding and early-stop controls (e.g., max_gen_toks, until=...) gen_kwargs=eval_config.get("gen_kwargs"), batch_size=batch_size, ) return results def test_lm_eval_correctness_param(config_filename, tp_size): eval_config = yaml.safe_load(config_filename.read_text(encoding="utf-8")) results = launch_lm_eval(eval_config, tp_size) rtol = eval_config.get("rtol", DEFAULT_RTOL) success = True for task in eval_config["tasks"]: for metric in task["metrics"]: ground_truth = metric["value"] measured_value = results["results"][task["name"]][metric["name"]] print( f"{task['name']} | {metric['name']}: " f"ground_truth={ground_truth:.3f} | " f"measured={measured_value:.3f} | rtol={rtol}" ) success = success and np.isclose(ground_truth, measured_value, rtol=rtol) assert success
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/docs/mkdocs/hooks/generate_metrics.py
docs/mkdocs/hooks/generate_metrics.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import ast import logging from pathlib import Path from typing import Literal logger = logging.getLogger("mkdocs") ROOT_DIR = Path(__file__).parent.parent.parent.parent DOCS_DIR = ROOT_DIR / "docs" GENERATED_METRICS_DIR = DOCS_DIR / "generated" / "metrics" # Files to scan for metric definitions - each will generate a separate table METRIC_SOURCE_FILES = [ {"path": "vllm/v1/metrics/loggers.py", "output": "general.inc.md"}, { "path": "vllm/v1/spec_decode/metrics.py", "output": "spec_decode.inc.md", }, { "path": "vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py", "output": "nixl_connector.inc.md", }, ] class MetricExtractor(ast.NodeVisitor): """AST visitor to extract metric definitions.""" def __init__(self): self.metrics: list[dict[str, str]] = [] def visit_Call(self, node: ast.Call) -> None: """Visit function calls to find metric class instantiations.""" metric_type = self._get_metric_type(node) if metric_type: name = self._extract_kwarg(node, "name") documentation = self._extract_kwarg(node, "documentation") if name: self.metrics.append( { "name": name, "type": metric_type, "documentation": documentation or "", } ) self.generic_visit(node) def _get_metric_type(self, node: ast.Call) -> str | None: """Determine if this call creates a metric and return its type.""" metric_type_map = { "_gauge_cls": "gauge", "_counter_cls": "counter", "_histogram_cls": "histogram", } if isinstance(node.func, ast.Attribute): return metric_type_map.get(node.func.attr) return None def _extract_kwarg(self, node: ast.Call, key: str) -> str | None: """Extract a keyword argument value from a function call.""" for keyword in node.keywords: if keyword.arg == key: return self._get_string_value(keyword.value) return None def _get_string_value(self, node: ast.AST) -> str | None: """Extract string value from an AST node.""" if isinstance(node, ast.Constant): return str(node.value) if node.value is not None else None return None def extract_metrics_from_file(filepath: Path) -> list[dict[str, str]]: """Parse a Python file and extract all metric definitions.""" try: with open(filepath, encoding="utf-8") as f: source = f.read() tree = ast.parse(source, filename=str(filepath)) extractor = MetricExtractor() extractor.visit(tree) return extractor.metrics except Exception as e: raise RuntimeError(f"Failed to parse {filepath}: {e}") from e def generate_markdown_table(metrics: list[dict[str, str]]) -> str: """Generate a markdown table from extracted metrics.""" if not metrics: return "No metrics found.\n" # Sort by type, then by name metrics_sorted = sorted(metrics, key=lambda m: (m["type"], m["name"])) lines = [] lines.append("| Metric Name | Type | Description |") lines.append("|-------------|------|-------------|") for metric in metrics_sorted: name = metric["name"] metric_type = metric["type"].capitalize() doc = metric["documentation"].replace("\n", " ").strip() lines.append(f"| `{name}` | {metric_type} | {doc} |") return "\n".join(lines) + "\n" def on_startup(command: Literal["build", "gh-deploy", "serve"], dirty: bool): """Generate metrics documentation tables from source files.""" logger.info("Generating metrics documentation") # Create generated directory if it doesn't exist GENERATED_METRICS_DIR.mkdir(parents=True, exist_ok=True) total_metrics = 0 for source_config in METRIC_SOURCE_FILES: source_path = source_config["path"] output_file = source_config["output"] filepath = ROOT_DIR / source_path if not filepath.exists(): raise FileNotFoundError(f"Metrics source file not found: {filepath}") logger.debug("Extracting metrics from: %s", source_path) metrics = extract_metrics_from_file(filepath) logger.debug("Found %d metrics in %s", len(metrics), source_path) # Generate and write the markdown table for this source table_content = generate_markdown_table(metrics) output_path = GENERATED_METRICS_DIR / output_file with open(output_path, "w", encoding="utf-8") as f: f.write(table_content) total_metrics += len(metrics) logger.info( "Generated metrics table: %s (%d metrics)", output_path.relative_to(ROOT_DIR), len(metrics), ) logger.info( "Total metrics generated: %d across %d files", total_metrics, len(METRIC_SOURCE_FILES), )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/docs/mkdocs/hooks/generate_examples.py
docs/mkdocs/hooks/generate_examples.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import itertools import logging from dataclasses import dataclass from functools import cached_property from pathlib import Path from typing import Literal import regex as re logger = logging.getLogger("mkdocs") ROOT_DIR = Path(__file__).parent.parent.parent.parent ROOT_DIR_RELATIVE = "../../../../.." EXAMPLE_DIR = ROOT_DIR / "examples" EXAMPLE_DOC_DIR = ROOT_DIR / "docs/examples" def title(text: str) -> str: # Default title case text = text.replace("_", " ").replace("/", " - ").title() # Custom substitutions subs = { "io": "IO", "api": "API", "cli": "CLI", "cpu": "CPU", "llm": "LLM", "mae": "MAE", "ner": "NER", "tpu": "TPU", "gguf": "GGUF", "lora": "LoRA", "rlhf": "RLHF", "vllm": "vLLM", "openai": "OpenAI", "lmcache": "LMCache", "multilora": "MultiLoRA", "mlpspeculator": "MLPSpeculator", r"fp\d+": lambda x: x.group(0).upper(), # e.g. fp16, fp32 r"int\d+": lambda x: x.group(0).upper(), # e.g. int8, int16 } for pattern, repl in subs.items(): text = re.sub(rf"\b{pattern}\b", repl, text, flags=re.IGNORECASE) return text @dataclass class Example: """ Example class for generating documentation content from a given path. Attributes: path (Path): The path to the main directory or file. category (str): The category of the document. Properties:: main_file() -> Path | None: Determines the main file in the given path. other_files() -> list[Path]: Determines other files in the directory excluding the main file. title() -> str: Determines the title of the document. Methods: generate() -> str: Generates the documentation content. """ path: Path category: str @cached_property def main_file(self) -> Path | None: """Determines the main file in the given path. If path is a file, it returns the path itself. If path is a directory, it searches for Markdown files (*.md) in the directory and returns the first one found. If no Markdown files are found, it returns None.""" # Single file example if self.path.is_file(): return self.path # Multi file example with a README if md_paths := list(self.path.glob("*.md")): return md_paths[0] # Multi file example without a README return None @cached_property def other_files(self) -> list[Path]: """Determine other files in the directory excluding the main file. If path is a file, it returns an empty list. Otherwise, it returns every file in the directory except the main file in a list.""" # Single file example if self.path.is_file(): return [] # Multi file example is_other_file = lambda file: file.is_file() and file != self.main_file return sorted(file for file in self.path.rglob("*") if is_other_file(file)) @cached_property def is_code(self) -> bool: return self.main_file is not None and self.main_file.suffix != ".md" @cached_property def title(self) -> str: # Generate title from filename if no main md file found if self.main_file is None or self.is_code: return title(self.path.stem) # Specify encoding for building on Windows with open(self.main_file, encoding="utf-8") as f: first_line = f.readline().strip() match = re.match(r"^#\s+(?P<title>.+)$", first_line) if match: return match.group("title") raise ValueError(f"Title not found in {self.main_file}") def fix_relative_links(self, content: str) -> str: """ Fix relative links in markdown content by converting them to gh-file format. Args: content (str): The markdown content to process Returns: str: Content with relative links converted to gh-file format """ # Regex to match markdown links [text](relative_path) # This matches links that don't start with http, https, ftp, or # link_pattern = r"\[([^\]]*)\]\((?!(?:https?|ftp)://|#)([^)]+)\)" def replace_link(match): link_text = match.group(1) relative_path = match.group(2) # Make relative to repo root gh_file = (self.main_file.parent / relative_path).resolve() gh_file = gh_file.relative_to(ROOT_DIR) # Make GitHub URL url = "https://github.com/vllm-project/vllm/" url += "tree/main" if self.path.is_dir() else "blob/main" gh_url = f"{url}/{gh_file}" return f"[{link_text}]({gh_url})" return re.sub(link_pattern, replace_link, content) def generate(self) -> str: content = f"# {self.title}\n\n" url = "https://github.com/vllm-project/vllm/" url += "tree/main" if self.path.is_dir() else "blob/main" content += f"Source <{url}/{self.path.relative_to(ROOT_DIR)}>.\n\n" # Use long code fence to avoid issues with # included files containing code fences too code_fence = "``````" if self.main_file is not None: # Single file example or multi file example with a README if self.is_code: content += ( f"{code_fence}{self.main_file.suffix[1:]}\n" f'--8<-- "{self.main_file}"\n' f"{code_fence}\n" ) else: with open(self.main_file, encoding="utf-8") as f: # Skip the title from md snippets as it's been included above main_content = f.readlines()[1:] content += self.fix_relative_links("".join(main_content)) content += "\n" else: # Multi file example without a README for file in self.other_files: file_title = title(str(file.relative_to(self.path).with_suffix(""))) content += f"## {file_title}\n\n" content += ( f'{code_fence}{file.suffix[1:]}\n--8<-- "{file}"\n{code_fence}\n\n' ) return content if not self.other_files: return content content += "## Example materials\n\n" for file in self.other_files: content += f'??? abstract "{file.relative_to(self.path)}"\n' if file.suffix != ".md": content += f" {code_fence}{file.suffix[1:]}\n" content += f' --8<-- "{file}"\n' if file.suffix != ".md": content += f" {code_fence}\n" return content def on_startup(command: Literal["build", "gh-deploy", "serve"], dirty: bool): logger.info("Generating example documentation") logger.debug("Root directory: %s", ROOT_DIR.resolve()) logger.debug("Example directory: %s", EXAMPLE_DIR.resolve()) logger.debug("Example document directory: %s", EXAMPLE_DOC_DIR.resolve()) # Create the EXAMPLE_DOC_DIR if it doesn't exist if not EXAMPLE_DOC_DIR.exists(): EXAMPLE_DOC_DIR.mkdir(parents=True) categories = sorted(p for p in EXAMPLE_DIR.iterdir() if p.is_dir()) examples = [] glob_patterns = ["*.py", "*.md", "*.sh"] # Find categorised examples for category in categories: logger.info("Processing category: %s", category.stem) globs = [category.glob(pattern) for pattern in glob_patterns] for path in itertools.chain(*globs): examples.append(Example(path, category.stem)) # Find examples in subdirectories globs = [category.glob(f"*/{pattern}") for pattern in glob_patterns] for path in itertools.chain(*globs): examples.append(Example(path.parent, category.stem)) # Generate the example documentation for example in sorted(examples, key=lambda e: e.path.stem): example_name = f"{example.path.stem}.md" doc_path = EXAMPLE_DOC_DIR / example.category / example_name if not doc_path.parent.exists(): doc_path.parent.mkdir(parents=True) # Specify encoding for building on Windows with open(doc_path, "w+", encoding="utf-8") as f: f.write(example.generate()) logger.debug("Example generated: %s", doc_path.relative_to(ROOT_DIR)) logger.info("Total examples generated: %d", len(examples))
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/docs/mkdocs/hooks/url_schemes.py
docs/mkdocs/hooks/url_schemes.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ MkDocs hook to enable the following links to render correctly: - Relative file links outside of the `docs/` directory, e.g.: - [Text](../some_file.py) - [Directory](../../some_directory/) - GitHub URLs for issues, pull requests, and projects, e.g.: - Adds GitHub icon before links - Replaces raw links with descriptive text, e.g. <...pull/123> -> [Pull Request #123](.../pull/123) - Works for external repos too by including the `owner/repo` in the link title The goal is to simplify cross-referencing common GitHub resources in project docs. """ from pathlib import Path import regex as re from mkdocs.config.defaults import MkDocsConfig from mkdocs.structure.files import Files from mkdocs.structure.pages import Page ROOT_DIR = Path(__file__).parent.parent.parent.parent.resolve() DOC_DIR = ROOT_DIR / "docs" gh_icon = ":octicons-mark-github-16:" # Regex pieces TITLE = r"(?P<title>[^\[\]<>]+?)" REPO = r"(?P<repo>.+?/.+?)" TYPE = r"(?P<type>issues|pull|projects)" NUMBER = r"(?P<number>\d+)" PATH = r"(?P<path>[^\s]+?)" FRAGMENT = r"(?P<fragment>#[^\s]+)?" URL = f"https://github.com/{REPO}/{TYPE}/{NUMBER}{FRAGMENT}" RELATIVE = rf"(?!(https?|ftp)://|#){PATH}{FRAGMENT}" # Common titles to use for GitHub links when none is provided in the link. TITLES = {"issues": "Issue ", "pull": "Pull Request ", "projects": "Project "} # Regex to match GitHub issue, PR, and project links with optional titles. github_link = re.compile(rf"(\[{TITLE}\]\(|<){URL}(\)|>)") # Regex to match relative file links with optional titles. relative_link = re.compile(rf"\[{TITLE}\]\({RELATIVE}\)") def on_page_markdown( markdown: str, *, page: Page, config: MkDocsConfig, files: Files ) -> str: def replace_relative_link(match: re.Match) -> str: """Replace relative file links with URLs if they point outside the docs dir.""" title = match.group("title") path = match.group("path") path = (Path(page.file.abs_src_path).parent / path).resolve() fragment = match.group("fragment") or "" # Check if the path exists and is outside the docs dir if not path.exists() or path.is_relative_to(DOC_DIR): return match.group(0) # Files and directories have different URL schemes on GitHub slug = "tree/main" if path.is_dir() else "blob/main" path = path.relative_to(ROOT_DIR) url = f"https://github.com/vllm-project/vllm/{slug}/{path}{fragment}" return f"[{gh_icon} {title}]({url})" def replace_github_link(match: re.Match) -> str: """Replace GitHub issue, PR, and project links with enhanced Markdown links.""" repo = match.group("repo") type = match.group("type") number = match.group("number") # Title and fragment could be None title = match.group("title") or "" fragment = match.group("fragment") or "" # Use default titles for raw links if not title: title = TITLES[type] if "vllm-project" not in repo: title += repo title += f"#{number}" url = f"https://github.com/{repo}/{type}/{number}{fragment}" return f"[{gh_icon} {title}]({url})" markdown = relative_link.sub(replace_relative_link, markdown) markdown = github_link.sub(replace_github_link, markdown) return markdown
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/docs/mkdocs/hooks/generate_argparse.py
docs/mkdocs/hooks/generate_argparse.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import importlib.metadata import importlib.util import logging import sys import traceback from argparse import SUPPRESS, Action, HelpFormatter from collections.abc import Iterable from importlib.machinery import ModuleSpec from pathlib import Path from typing import TYPE_CHECKING, Literal from unittest.mock import MagicMock, patch from pydantic_core import core_schema logger = logging.getLogger("mkdocs") ROOT_DIR = Path(__file__).parent.parent.parent.parent ARGPARSE_DOC_DIR = ROOT_DIR / "docs/generated/argparse" sys.path.insert(0, str(ROOT_DIR)) def mock_if_no_torch(mock_module: str, mock: MagicMock): if not importlib.util.find_spec("torch"): sys.modules[mock_module] = mock # Mock custom op code class MockCustomOp: @staticmethod def register(name): def decorator(cls): return cls return decorator mock_if_no_torch("vllm._C", MagicMock()) mock_if_no_torch("vllm.model_executor.custom_op", MagicMock(CustomOp=MockCustomOp)) mock_if_no_torch( "vllm.utils.torch_utils", MagicMock(direct_register_custom_op=lambda *a, **k: None) ) # Mock any version checks by reading from compiled CI requirements with open(ROOT_DIR / "requirements/test.txt") as f: VERSIONS = dict(line.strip().split("==") for line in f if "==" in line) importlib.metadata.version = lambda name: VERSIONS.get(name) or "0.0.0" # Make torch.nn.Parameter safe to inherit from mock_if_no_torch("torch.nn", MagicMock(Parameter=object)) class PydanticMagicMock(MagicMock): """`MagicMock` that's able to generate pydantic-core schemas.""" def __init__(self, *args, **kwargs): name = kwargs.pop("name", None) super().__init__(*args, **kwargs) self.__spec__ = ModuleSpec(name, None) def __get_pydantic_core_schema__(self, source_type, handler): return core_schema.any_schema() def auto_mock(module_name: str, attr: str, max_mocks: int = 100): """Function that automatically mocks missing modules during imports.""" logger.info("Importing %s from %s", attr, module_name) for _ in range(max_mocks): try: module = importlib.import_module(module_name) # First treat attr as an attr, then as a submodule if hasattr(module, attr): return getattr(module, attr) return importlib.import_module(f"{module_name}.{attr}") except ModuleNotFoundError as e: assert e.name is not None logger.info("Mocking %s for argparse doc generation", e.name) sys.modules[e.name] = PydanticMagicMock(name=e.name) except Exception: logger.exception("Failed to import %s.%s: %s", module_name, attr) raise ImportError( f"Failed to import {module_name}.{attr} after mocking {max_mocks} imports" ) bench_latency = auto_mock("vllm.benchmarks", "latency") bench_mm_processor = auto_mock("vllm.benchmarks", "mm_processor") bench_serve = auto_mock("vllm.benchmarks", "serve") bench_sweep_plot = auto_mock("vllm.benchmarks.sweep.plot", "SweepPlotArgs") bench_sweep_plot_pareto = auto_mock( "vllm.benchmarks.sweep.plot_pareto", "SweepPlotParetoArgs" ) bench_sweep_serve = auto_mock("vllm.benchmarks.sweep.serve", "SweepServeArgs") bench_sweep_serve_sla = auto_mock( "vllm.benchmarks.sweep.serve_sla", "SweepServeSLAArgs" ) bench_throughput = auto_mock("vllm.benchmarks", "throughput") AsyncEngineArgs = auto_mock("vllm.engine.arg_utils", "AsyncEngineArgs") EngineArgs = auto_mock("vllm.engine.arg_utils", "EngineArgs") ChatCommand = auto_mock("vllm.entrypoints.cli.openai", "ChatCommand") CompleteCommand = auto_mock("vllm.entrypoints.cli.openai", "CompleteCommand") openai_cli_args = auto_mock("vllm.entrypoints.openai", "cli_args") openai_run_batch = auto_mock("vllm.entrypoints.openai", "run_batch") if TYPE_CHECKING: from vllm.utils.argparse_utils import FlexibleArgumentParser else: FlexibleArgumentParser = auto_mock( "vllm.utils.argparse_utils", "FlexibleArgumentParser" ) class MarkdownFormatter(HelpFormatter): """Custom formatter that generates markdown for argument groups.""" def __init__(self, prog: str, starting_heading_level: int = 3): super().__init__(prog, max_help_position=sys.maxsize, width=sys.maxsize) self._section_heading_prefix = "#" * starting_heading_level self._argument_heading_prefix = "#" * (starting_heading_level + 1) self._markdown_output = [] def start_section(self, heading: str): if heading not in {"positional arguments", "options"}: heading_md = f"\n{self._section_heading_prefix} {heading}\n\n" self._markdown_output.append(heading_md) def end_section(self): pass def add_text(self, text: str): if text: self._markdown_output.append(f"{text.strip()}\n\n") def add_usage(self, usage, actions, groups, prefix=None): pass def add_arguments(self, actions: Iterable[Action]): for action in actions: if len(action.option_strings) == 0 or "--help" in action.option_strings: continue option_strings = f"`{'`, `'.join(action.option_strings)}`" heading_md = f"{self._argument_heading_prefix} {option_strings}\n\n" self._markdown_output.append(heading_md) if choices := action.choices: choices = f"`{'`, `'.join(str(c) for c in choices)}`" self._markdown_output.append(f"Possible choices: {choices}\n\n") elif (metavar := action.metavar) and isinstance(metavar, (list, tuple)): metavar = f"`{'`, `'.join(str(m) for m in metavar)}`" self._markdown_output.append(f"Possible choices: {metavar}\n\n") if action.help: self._markdown_output.append(f"{action.help}\n\n") if (default := action.default) != SUPPRESS: # Make empty string defaults visible if default == "": default = '""' self._markdown_output.append(f"Default: `{default}`\n\n") def format_help(self): """Return the formatted help as markdown.""" return "".join(self._markdown_output) def create_parser(add_cli_args, **kwargs) -> FlexibleArgumentParser: """Create a parser for the given class with markdown formatting. Args: cls: The class to create a parser for **kwargs: Additional keyword arguments to pass to `cls.add_cli_args`. Returns: FlexibleArgumentParser: A parser with markdown formatting for the class. """ try: parser = FlexibleArgumentParser(add_json_tip=False) parser.formatter_class = MarkdownFormatter with patch("vllm.config.DeviceConfig.__post_init__"): _parser = add_cli_args(parser, **kwargs) except ModuleNotFoundError as e: # Auto-mock runtime imports if tb_list := traceback.extract_tb(e.__traceback__): path = Path(tb_list[-1].filename).relative_to(ROOT_DIR) auto_mock(module_name=".".join(path.parent.parts), attr=path.stem) return create_parser(add_cli_args, **kwargs) else: raise e # add_cli_args might be in-place so return parser if _parser is None return _parser or parser def on_startup(command: Literal["build", "gh-deploy", "serve"], dirty: bool): logger.info("Generating argparse documentation") logger.debug("Root directory: %s", ROOT_DIR.resolve()) logger.debug("Output directory: %s", ARGPARSE_DOC_DIR.resolve()) # Create the ARGPARSE_DOC_DIR if it doesn't exist if not ARGPARSE_DOC_DIR.exists(): ARGPARSE_DOC_DIR.mkdir(parents=True) # Create parsers to document parsers = { # Engine args "engine_args": create_parser(EngineArgs.add_cli_args), "async_engine_args": create_parser( AsyncEngineArgs.add_cli_args, async_args_only=True ), # CLI "serve": create_parser(openai_cli_args.make_arg_parser), "chat": create_parser(ChatCommand.add_cli_args), "complete": create_parser(CompleteCommand.add_cli_args), "run-batch": create_parser(openai_run_batch.make_arg_parser), # Benchmark CLI "bench_latency": create_parser(bench_latency.add_cli_args), "bench_mm_processor": create_parser(bench_mm_processor.add_cli_args), "bench_serve": create_parser(bench_serve.add_cli_args), "bench_sweep_plot": create_parser(bench_sweep_plot.add_cli_args), "bench_sweep_plot_pareto": create_parser(bench_sweep_plot_pareto.add_cli_args), "bench_sweep_serve": create_parser(bench_sweep_serve.add_cli_args), "bench_sweep_serve_sla": create_parser(bench_sweep_serve_sla.add_cli_args), "bench_throughput": create_parser(bench_throughput.add_cli_args), } # Generate documentation for each parser for stem, parser in parsers.items(): doc_path = ARGPARSE_DOC_DIR / f"{stem}.inc.md" # Specify encoding for building on Windows with open(doc_path, "w", encoding="utf-8") as f: f.write(super(type(parser), parser).format_help()) logger.info("Argparse generated: %s", doc_path.relative_to(ROOT_DIR)) if __name__ == "__main__": on_startup("build", False)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/docs/mkdocs/hooks/remove_announcement.py
docs/mkdocs/hooks/remove_announcement.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os from pathlib import Path from typing import Literal def on_startup(command: Literal["build", "gh-deploy", "serve"], dirty: bool): # see https://docs.readthedocs.io/en/stable/reference/environment-variables.html # noqa if os.getenv("READTHEDOCS_VERSION_TYPE") == "tag": # remove the warning banner if the version is a tagged release mkdocs_dir = Path(__file__).parent.parent announcement_path = mkdocs_dir / "overrides/main.html" # The file might be removed already if the build is triggered multiple # times (readthedocs build both HTML and PDF versions separately) if announcement_path.exists(): os.remove(announcement_path)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/metrics.py
examples/offline_inference/metrics.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm import LLM, SamplingParams from vllm.v1.metrics.reader import Counter, Gauge, Histogram, Vector # Sample prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create a sampling params object. sampling_params = SamplingParams(temperature=0.8, top_p=0.95) def main(): # Create an LLM. llm = LLM(model="facebook/opt-125m", disable_log_stats=False) # Generate texts from the prompts. outputs = llm.generate(prompts, sampling_params) # Print the outputs. print("-" * 50) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") print("-" * 50) # Dump all metrics for metric in llm.get_metrics(): if isinstance(metric, Gauge): print(f"{metric.name} (gauge) = {metric.value}") elif isinstance(metric, Counter): print(f"{metric.name} (counter) = {metric.value}") elif isinstance(metric, Vector): print(f"{metric.name} (vector) = {metric.values}") elif isinstance(metric, Histogram): print(f"{metric.name} (histogram)") print(f" sum = {metric.sum}") print(f" count = {metric.count}") for bucket_le, value in metric.buckets.items(): print(f" {bucket_le} = {value}") if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/llm_engine_reset_kv.py
examples/offline_inference/llm_engine_reset_kv.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This file demonstrates preempt requests when using the `LLMEngine` for processing prompts with various sampling parameters. """ import argparse from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams from vllm.utils.argparse_utils import FlexibleArgumentParser def create_test_prompts() -> list[tuple[str, SamplingParams]]: """Create a list of test prompts with their sampling parameters.""" return [ ( "A robot may not injure a human being " * 50, SamplingParams( temperature=0.0, logprobs=1, prompt_logprobs=1, max_tokens=16 ), ), ( "A robot may not injure a human being " * 50, SamplingParams( temperature=0.0, logprobs=1, prompt_logprobs=1, max_tokens=16 ), ), ( "To be or not to be,", SamplingParams( temperature=0.8, top_k=5, presence_penalty=0.2, max_tokens=128 ), ), ( "What is the meaning of life?", SamplingParams( n=2, temperature=0.8, top_p=0.95, frequency_penalty=0.1, max_tokens=128 ), ), ] def process_requests(engine: LLMEngine, test_prompts: list[tuple[str, SamplingParams]]): """Continuously process a list of prompts and handle the outputs.""" request_id = 0 print("-" * 50) step_id = 0 while test_prompts or engine.has_unfinished_requests(): print("-" * 50) import os print(f"Step {step_id} (pid={os.getpid()})") if test_prompts: prompt, sampling_params = test_prompts.pop(0) engine.add_request(str(request_id), prompt, sampling_params) request_id += 1 if step_id == 10: print(f"Resetting prefix cache at {step_id}") engine.reset_prefix_cache(reset_running_requests=True) request_outputs: list[RequestOutput] = engine.step() for request_output in request_outputs: if request_output.finished: print("-" * 50) print(request_output) print("-" * 50) step_id += 1 def initialize_engine(args: argparse.Namespace) -> LLMEngine: """Initialize the LLMEngine from the command line arguments.""" engine_args = EngineArgs.from_cli_args(args) return LLMEngine.from_engine_args(engine_args) def parse_args(): parser = FlexibleArgumentParser( description="Demo on using the LLMEngine class directly" ) parser = EngineArgs.add_cli_args(parser) return parser.parse_args() def main(args: argparse.Namespace): """Main function that sets up and runs the prompt processing.""" engine = initialize_engine(args) test_prompts = create_test_prompts() process_requests(engine, test_prompts) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/vision_language.py
examples/offline_inference/vision_language.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This example shows how to use vLLM for running offline inference with the correct prompt format on vision language models for text generation. For most models, the prompt format should follow corresponding examples on HuggingFace model repository. """ import os import random from contextlib import contextmanager from dataclasses import asdict from typing import NamedTuple from huggingface_hub import snapshot_download from transformers import AutoTokenizer from vllm import LLM, EngineArgs, SamplingParams from vllm.assets.image import ImageAsset from vllm.assets.video import VideoAsset from vllm.lora.request import LoRARequest from vllm.multimodal.image import convert_image_mode from vllm.utils.argparse_utils import FlexibleArgumentParser class ModelRequestData(NamedTuple): engine_args: EngineArgs prompts: list[str] stop_token_ids: list[int] | None = None lora_requests: list[LoRARequest] | None = None sampling_params: list[SamplingParams] | None = None # NOTE: The default `max_num_seqs` and `max_model_len` may result in OOM on # lower-end GPUs. # Unless specified, these settings have been tested to work on a single L4. # Aria def run_aria(questions: list[str], modality: str) -> ModelRequestData: assert modality == "image" model_name = "rhymes-ai/Aria" # NOTE: Need L40 (or equivalent) to avoid OOM engine_args = EngineArgs( model=model_name, max_model_len=4096, max_num_seqs=2, dtype="bfloat16", limit_mm_per_prompt={modality: 1}, ) prompts = [ ( f"<|im_start|>user\n<fim_prefix><|img|><fim_suffix>{question}" "<|im_end|>\n<|im_start|>assistant\n" ) for question in questions ] stop_token_ids = [93532, 93653, 944, 93421, 1019, 93653, 93519] return ModelRequestData( engine_args=engine_args, prompts=prompts, stop_token_ids=stop_token_ids, ) # Aya Vision def run_aya_vision(questions: list[str], modality: str) -> ModelRequestData: assert modality == "image" model_name = "CohereLabs/aya-vision-8b" engine_args = EngineArgs( model=model_name, max_model_len=2048, max_num_seqs=2, mm_processor_kwargs={"crop_to_patches": True}, limit_mm_per_prompt={modality: 1}, ) prompts = [ f"<|START_OF_TURN_TOKEN|><|USER_TOKEN|>)", "video": "(<video>./</video>)", } prompts = [ tokenizer.apply_chat_template( [ { "role": "user", "content": f"{modality_placeholder[modality]}\n{question}", } ], tokenize=False, add_generation_prompt=True, ) for question in questions ] return ModelRequestData( engine_args=engine_args, prompts=prompts, stop_token_ids=stop_token_ids, ) def run_minicpmo(questions: list[str], modality: str) -> ModelRequestData: return run_minicpmv_base(questions, modality, "openbmb/MiniCPM-o-2_6") def run_minicpmv(questions: list[str], modality: str) -> ModelRequestData: return run_minicpmv_base(questions, modality, "openbmb/MiniCPM-V-2_6") def run_minimax_vl_01(questions: list[str], modality: str) -> ModelRequestData: assert modality == "image" model_name = "MiniMaxAI/MiniMax-VL-01" engine_args = EngineArgs( model=model_name, max_num_seqs=2, limit_mm_per_prompt={modality: 1}, trust_remote_code=True, tensor_parallel_size=8, ) tokenizer = AutoTokenizer.from_pretrained(model_name) messages = [ [ { "role": "user", "content": [{"type": "image"}, {"type": "text", "text": question}], } ] for question in questions ] prompts = tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=False ) return ModelRequestData( engine_args=engine_args, prompts=prompts, ) # Mistral-3 HF-format def run_mistral3(questions: list[str], modality: str) -> ModelRequestData: assert modality == "image" model_name = "mistralai/Mistral-Small-3.1-24B-Instruct-2503" # NOTE: Need L40 (or equivalent) to avoid OOM engine_args = EngineArgs( model=model_name, max_model_len=8192, max_num_seqs=2, tensor_parallel_size=2, limit_mm_per_prompt={modality: 1}, ignore_patterns=["consolidated.safetensors"], ) prompts = [f"<s>[INST]{question}\n[IMG][/INST]" for question in questions] return ModelRequestData( engine_args=engine_args, prompts=prompts, ) # Molmo def run_molmo(questions: list[str], modality: str) -> ModelRequestData: assert modality == "image" model_name = "allenai/Molmo-7B-D-0924" engine_args = EngineArgs( model=model_name, trust_remote_code=True, dtype="bfloat16", limit_mm_per_prompt={modality: 1}, ) prompts = [ f"<|im_start|>user <image>\n{question}<|im_end|><|im_start|>assistant\n" for question in questions ] return ModelRequestData( engine_args=engine_args, prompts=prompts, ) # Nemontron_VL def run_nemotron_vl(questions: list[str], modality: str) -> ModelRequestData: model_name = "nvidia/Llama-3.1-Nemotron-Nano-VL-8B-V1"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/mlpspeculator.py
examples/offline_inference/mlpspeculator.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This file demonstrates the usage of text generation with an LLM model, comparing the performance with and without speculative decoding. Note that this example is out of date and not supported in vLLM v1. """ import gc import time from vllm import LLM, SamplingParams def time_generation( llm: LLM, prompts: list[str], sampling_params: SamplingParams, title: str ): # Generate texts from the prompts. The output is a list of RequestOutput # objects that contain the prompt, generated text, and other information. # Warmup first llm.generate(prompts, sampling_params) llm.generate(prompts, sampling_params) start = time.time() outputs = llm.generate(prompts, sampling_params) end = time.time() print("-" * 50) print(title) print("time: ", (end - start) / sum(len(o.outputs[0].token_ids) for o in outputs)) # Print the outputs. for output in outputs: generated_text = output.outputs[0].text print(f"text: {generated_text!r}") print("-" * 50) def main(): template = ( "Below is an instruction that describes a task. Write a response " "that appropriately completes the request.\n\n### Instruction:\n{}" "\n\n### Response:\n" ) # Sample prompts. prompts = [ "Write about the president of the United States.", ] prompts = [template.format(prompt) for prompt in prompts] # Create a sampling params object. sampling_params = SamplingParams(temperature=0.0, max_tokens=200) # Create an LLM without spec decoding llm = LLM(model="meta-llama/Llama-2-13b-chat-hf") time_generation(llm, prompts, sampling_params, "Without speculation") del llm gc.collect() # Create an LLM with spec decoding llm = LLM( model="meta-llama/Llama-2-13b-chat-hf", speculative_config={ "model": "ibm-ai-platform/llama-13b-accelerator", }, ) time_generation(llm, prompts, sampling_params, "With speculation") if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/skip_loading_weights_in_engine_init.py
examples/offline_inference/skip_loading_weights_in_engine_init.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm import LLM, RequestOutput, SamplingParams # Sample prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create a sampling params object. sampling_params = SamplingParams(temperature=0.8, top_p=0.95) def print_prompts_and_outputs(outputs: list[RequestOutput]) -> None: print("-" * 60) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}") print(f"Output: {generated_text!r}") print("-" * 60) def main(): # Create an LLM without loading real weights llm = LLM( model="Qwen/Qwen3-0.6B", load_format="dummy", enforce_eager=True, tensor_parallel_size=4, ) outputs = llm.generate(prompts, sampling_params) print("\nOutputs do not make sense:") print_prompts_and_outputs(outputs) # Update load format from `dummy` to `auto` llm.collective_rpc( "update_config", args=({"load_config": {"load_format": "auto"}},) ) # Now reload real weights inplace llm.collective_rpc("reload_weights") # Check outputs make sense outputs = llm.generate(prompts, sampling_params) print("\nOutputs make sense after loading real weights:") print_prompts_and_outputs(outputs) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/data_parallel.py
examples/offline_inference/data_parallel.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Usage: Single node: python examples/offline_inference/data_parallel.py \ --model="ibm-research/PowerMoE-3b" \ -dp=2 \ -tp=2 Multi-node: Node 0 (assume the node has ip of 10.99.48.128): python examples/offline_inference/data_parallel.py \ --model="ibm-research/PowerMoE-3b" \ -dp=2 \ -tp=2 \ --dp-num-nodes=2 \ --dp-node-rank=0 \ --dp-master-addr=10.99.48.128 \ --dp-master-port=13345 Node 1: python examples/offline_inference/data_parallel.py \ --model="ibm-research/PowerMoE-3b" \ -dp=2 \ -tp=2 \ --dp-num-nodes=2 \ --dp-node-rank=1 \ --dp-master-addr=10.99.48.128 \ --dp-master-port=13345 """ import os from time import sleep from vllm import LLM, EngineArgs, SamplingParams from vllm.platforms import current_platform from vllm.utils.argparse_utils import FlexibleArgumentParser from vllm.utils.network_utils import get_open_port def create_parser(): parser = FlexibleArgumentParser(description="Data Parallel Inference") # Add all engine args EngineArgs.add_cli_args(parser) parser.set_defaults( model="ibm-research/PowerMoE-3b", enable_expert_parallel=True, ) # Add DP-specific args (separate from engine args to avoid conflicts) parser.add_argument( "--dp-num-nodes", type=int, default=1, help="Total number of nodes for data parallel.", ) parser.add_argument( "--dp-node-rank", type=int, default=0, help="Rank of the current node for data parallel.", ) parser.add_argument( "--dp-master-addr", type=str, default="", help="Master node IP address for DP coordination.", ) parser.add_argument( "--dp-master-port", type=int, default=0, help="Master node port for DP coordination.", ) parser.add_argument( "--timeout", type=int, default=300, help="Number of seconds before unresponsive process is killed.", ) return parser def main( dp_size, local_dp_rank, global_dp_rank, dp_master_ip, dp_master_port, engine_args, ): os.environ["VLLM_DP_RANK"] = str(global_dp_rank) os.environ["VLLM_DP_RANK_LOCAL"] = str(local_dp_rank) os.environ["VLLM_DP_SIZE"] = str(dp_size) os.environ["VLLM_DP_MASTER_IP"] = dp_master_ip os.environ["VLLM_DP_MASTER_PORT"] = str(dp_master_port) # CUDA_VISIBLE_DEVICES for each DP rank is set automatically inside the # engine processes. # Sample prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] * 100 # with DP, each rank should process different prompts. # usually all the DP ranks process a full dataset, # and each rank processes a different part of the dataset. floor = len(prompts) // dp_size remainder = len(prompts) % dp_size # Distribute prompts into even groups. def start(rank): return rank * floor + min(rank, remainder) prompts = prompts[start(global_dp_rank) : start(global_dp_rank + 1)] if len(prompts) == 0: # if any rank has no prompts to process, # we need to set a placeholder prompt prompts = ["Placeholder"] print(f"DP rank {global_dp_rank} needs to process {len(prompts)} prompts") # Create a sampling params object. # since we are doing data parallel, every rank can have different # sampling params. here we set different max_tokens for different # ranks for demonstration. sampling_params = SamplingParams( temperature=0.8, top_p=0.95, max_tokens=[16, 20][global_dp_rank % 2] ) # Create an LLM. llm = LLM(**engine_args) outputs = llm.generate(prompts, sampling_params) # Print the outputs. for i, output in enumerate(outputs): if i >= 5: # print only 5 outputs break prompt = output.prompt generated_text = output.outputs[0].text print( f"DP rank {global_dp_rank}, Prompt: {prompt!r}, " f"Generated text: {generated_text!r}" ) # Give engines time to pause their processing loops before exiting. sleep(1) if __name__ == "__main__": parser = create_parser() args = vars(parser.parse_args()) # Extract DP-specific args (pop to remove from engine_args) dp_size = args.pop("data_parallel_size") dp_num_nodes = args.pop("dp_num_nodes") dp_node_rank = args.pop("dp_node_rank") dp_master_addr = args.pop("dp_master_addr") dp_master_port = args.pop("dp_master_port") timeout = args.pop("timeout") # Remaining args are engine args engine_args = args if dp_num_nodes == 1: dp_master_ip = "127.0.0.1" dp_master_port_val = get_open_port() else: dp_master_ip = dp_master_addr dp_master_port_val = dp_master_port assert dp_size % dp_num_nodes == 0, "dp_size should be divisible by dp_num_nodes" dp_per_node = dp_size // dp_num_nodes from multiprocessing import Process if current_platform.is_rocm(): from multiprocessing import set_start_method set_start_method("spawn", force=True) procs = [] for local_dp_rank, global_dp_rank in enumerate( range(dp_node_rank * dp_per_node, (dp_node_rank + 1) * dp_per_node) ): proc = Process( target=main, args=( dp_size, local_dp_rank, global_dp_rank, dp_master_ip, dp_master_port_val, engine_args, ), ) proc.start() procs.append(proc) exit_code = 0 for proc in procs: proc.join(timeout=timeout) if proc.exitcode is None: print(f"Killing process {proc.pid} that didn't stop within 5 minutes.") proc.kill() exit_code = 1 elif proc.exitcode: exit_code = proc.exitcode exit(exit_code)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/batch_llm_inference.py
examples/offline_inference/batch_llm_inference.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This example shows how to use Ray Data for data parallel batch inference. Ray Data is a data processing framework that can process very large datasets with first-class support for vLLM. Ray Data provides functionality for: * Reading and writing to most popular file formats and cloud object storage. * Streaming execution, so you can run inference on datasets that far exceed the aggregate RAM of the cluster. * Scale up the workload without code changes. * Automatic sharding, load-balancing, and autoscaling across a Ray cluster, with built-in fault-tolerance and retry semantics. * Continuous batching that keeps vLLM replicas saturated and maximizes GPU utilization. * Compatible with tensor/pipeline parallel inference. Learn more about Ray Data's LLM integration: https://docs.ray.io/en/latest/data/working-with-llms.html """ import ray from packaging.version import Version from ray.data.llm import build_llm_processor, vLLMEngineProcessorConfig assert Version(ray.__version__) >= Version("2.44.1"), ( "Ray version must be at least 2.44.1" ) # Uncomment to reduce clutter in stdout # ray.init(log_to_driver=False) # ray.data.DataContext.get_current().enable_progress_bars = False # Read one text file from S3. Ray Data supports reading multiple files # from cloud storage (such as JSONL, Parquet, CSV, binary format). ds = ray.data.read_text("s3://anonymous@air-example-data/prompts.txt") print(ds.schema()) size = ds.count() print(f"Size of dataset: {size} prompts") # Configure vLLM engine. config = vLLMEngineProcessorConfig( model_source="unsloth/Llama-3.1-8B-Instruct", engine_kwargs={ "enable_chunked_prefill": True, "max_num_batched_tokens": 4096, "max_model_len": 16384, }, concurrency=1, # set the number of parallel vLLM replicas batch_size=64, ) # Create a Processor object, which will be used to # do batch inference on the dataset vllm_processor = build_llm_processor( config, preprocess=lambda row: dict( messages=[ {"role": "system", "content": "You are a bot that responds with haikus."}, {"role": "user", "content": row["text"]}, ], sampling_params=dict( temperature=0.3, max_tokens=250, ), ), postprocess=lambda row: dict( answer=row["generated_text"], **row, # This will return all the original columns in the dataset. ), ) ds = vllm_processor(ds) # Peek first 10 results. # NOTE: This is for local testing and debugging. For production use case, # one should write full result out as shown below. outputs = ds.take(limit=10) for output in outputs: prompt = output["prompt"] generated_text = output["generated_text"] print(f"Prompt: {prompt!r}") print(f"Generated text: {generated_text!r}") # Write inference output data out as Parquet files to S3. # Multiple files would be written to the output destination, # and each task would write one or more files separately. # # ds.write_parquet("s3://<your-output-bucket>")
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/rlhf.py
examples/offline_inference/rlhf.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Demonstrates reinforcement learning from human feedback (RLHF) using vLLM and Ray. The script separates training and inference workloads onto distinct GPUs so that Ray can manage process placement and inter-process communication. A Hugging Face Transformer model occupies GPU 0 for training, whereas a tensor-parallel vLLM inference engine occupies GPU 1–2. The example performs the following steps: * Load the training model on GPU 0. * Split the inference model across GPUs 1–2 using vLLM's tensor parallelism and Ray placement groups. * Generate text from a list of prompts using the inference engine. * Update the weights of the training model and broadcast the updated weights to the inference engine by using a Ray collective RPC group. Note that for demonstration purposes we simply zero out the weights. For a production-ready implementation that supports multiple training and inference replicas, see the OpenRLHF framework: https://github.com/OpenRLHF/OpenRLHF This example assumes a single-node cluster with three GPUs, but Ray supports multi-node clusters. vLLM expects the GPUs are only used for vLLM workloads. Residual GPU activity interferes with vLLM memory profiling and causes unexpected behavior. """ import os import ray import torch from ray.util.placement_group import placement_group from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy from rlhf_utils import stateless_init_process_group from transformers import AutoModelForCausalLM from vllm import LLM, SamplingParams from vllm.utils.network_utils import get_ip, get_open_port class MyLLM(LLM): """Configure the vLLM worker for Ray placement group execution.""" def __init__(self, *args, **kwargs): # Remove the top-level CUDA_VISIBLE_DEVICES variable set by Ray # so that vLLM can manage its own device placement within the worker. os.environ.pop("CUDA_VISIBLE_DEVICES", None) super().__init__(*args, **kwargs) # Load the OPT-125M model onto GPU 0 for the training workload. train_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") train_model.to("cuda:0") # Initialize Ray and set the visible devices. The vLLM engine will # be placed on GPUs 1 and 2. os.environ["CUDA_VISIBLE_DEVICES"] = "1,2" ray.init() # Create a placement group that reserves GPU 1–2 for the vLLM inference engine. # Learn more about Ray placement groups: # https://docs.ray.io/en/latest/ray-core/scheduling/placement-group.html pg_inference = placement_group([{"GPU": 1, "CPU": 0}] * 2) ray.get(pg_inference.ready()) scheduling_inference = PlacementGroupSchedulingStrategy( placement_group=pg_inference, placement_group_capture_child_tasks=True, placement_group_bundle_index=0, ) # Launch the vLLM inference engine. The `enforce_eager` flag reduces # start-up latency. llm = ray.remote( num_cpus=0, num_gpus=0, scheduling_strategy=scheduling_inference, )(MyLLM).remote( model="facebook/opt-125m", enforce_eager=True, worker_extension_cls="rlhf_utils.WorkerExtension", tensor_parallel_size=2, distributed_executor_backend="ray", ) # Generate text from the prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] sampling_params = SamplingParams(temperature=0) outputs = ray.get(llm.generate.remote(prompts, sampling_params)) print("-" * 50) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") print("-" * 50) # Set up the communication channel between the training process and the # inference engine. master_address = get_ip() master_port = get_open_port() handle = llm.collective_rpc.remote( "init_weight_update_group", args=(master_address, master_port, 1, 3) ) model_update_group = stateless_init_process_group( master_address, master_port, 0, 3, torch.device("cuda:0") ) ray.get(handle) # Simulate a training step by zeroing out all model weights. # In a real RLHF training loop the weights would be updated using the gradient # from an RL objective such as PPO on a reward model. for name, p in train_model.named_parameters(): p.data.zero_() # Synchronize the updated weights to the inference engine. for name, p in train_model.named_parameters(): dtype_name = str(p.dtype).split(".")[-1] handle = llm.collective_rpc.remote( "update_weight", args=(name, dtype_name, p.shape) ) model_update_group.broadcast(p, src=0, stream=torch.cuda.current_stream()) ray.get(handle) # Verify that the inference weights have been updated. assert all(ray.get(llm.collective_rpc.remote("check_weights_changed"))) # Generate text with the updated model. The output is expected to be nonsense # because the weights are zero. outputs_updated = ray.get(llm.generate.remote(prompts, sampling_params)) print("-" * 50) for output in outputs_updated: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") print("-" * 50)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/llm_engine_example.py
examples/offline_inference/llm_engine_example.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This file demonstrates using the `LLMEngine` for processing prompts with various sampling parameters. """ import argparse from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams from vllm.utils.argparse_utils import FlexibleArgumentParser def create_test_prompts() -> list[tuple[str, SamplingParams]]: """Create a list of test prompts with their sampling parameters.""" return [ ( "A robot may not injure a human being", SamplingParams(temperature=0.0, logprobs=1, prompt_logprobs=1), ), ( "To be or not to be,", SamplingParams(temperature=0.8, top_k=5, presence_penalty=0.2), ), ( "What is the meaning of life?", SamplingParams(n=2, temperature=0.8, top_p=0.95, frequency_penalty=0.1), ), ] def process_requests(engine: LLMEngine, test_prompts: list[tuple[str, SamplingParams]]): """Continuously process a list of prompts and handle the outputs.""" request_id = 0 print("-" * 50) while test_prompts or engine.has_unfinished_requests(): if test_prompts: prompt, sampling_params = test_prompts.pop(0) engine.add_request(str(request_id), prompt, sampling_params) request_id += 1 request_outputs: list[RequestOutput] = engine.step() for request_output in request_outputs: if request_output.finished: print(request_output) print("-" * 50) def initialize_engine(args: argparse.Namespace) -> LLMEngine: """Initialize the LLMEngine from the command line arguments.""" engine_args = EngineArgs.from_cli_args(args) return LLMEngine.from_engine_args(engine_args) def parse_args(): parser = FlexibleArgumentParser( description="Demo on using the LLMEngine class directly" ) parser = EngineArgs.add_cli_args(parser) return parser.parse_args() def main(args: argparse.Namespace): """Main function that sets up and runs the prompt processing.""" engine = initialize_engine(args) test_prompts = create_test_prompts() process_requests(engine, test_prompts) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/prefix_caching.py
examples/offline_inference/prefix_caching.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm import LLM, SamplingParams from vllm.distributed import cleanup_dist_env_and_memory # NOTE: This is just a running example. For benchmarking purpose, # please see benchmarks/benchmark_prefix_caching.py # Common prefix. prefix = ( "You are an expert school principal, skilled in effectively managing " "faculty and staff. Draft 10-15 questions for a potential first grade " "Head Teacher for my K-12, all-girls', independent school that emphasizes " "community, joyful discovery, and life-long learning. The candidate is " "coming in for a first-round panel interview for a 8th grade Math " "teaching role. They have 5 years of previous teaching experience " "as an assistant teacher at a co-ed, public school with experience " "in middle school math teaching. Based on these information, fulfill " "the following paragraph: " ) # Sample prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] generating_prompts = [prefix + prompt for prompt in prompts] # Create a sampling params object. sampling_params = SamplingParams(temperature=0.0) def main(): # Create an LLM without prefix caching as a baseline. regular_llm = LLM(model="facebook/opt-125m", gpu_memory_utilization=0.4) print("Results without `enable_prefix_caching`") # ruff: noqa: E501 # Generate texts from the prompts. The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = regular_llm.generate(generating_prompts, sampling_params) regular_generated_texts = [] # Print the outputs. print("-" * 50) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text regular_generated_texts.append(generated_text) print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") print("-" * 50) # Destroy the LLM object and free up the GPU memory. del regular_llm cleanup_dist_env_and_memory() # Create an LLM with prefix caching enabled. prefix_cached_llm = LLM( model="facebook/opt-125m", enable_prefix_caching=True, gpu_memory_utilization=0.4, ) # Warmup so that the shared prompt's KV cache is computed. prefix_cached_llm.generate(generating_prompts[0], sampling_params) # Generate with prefix caching. outputs = prefix_cached_llm.generate(generating_prompts, sampling_params) print("Results with `enable_prefix_caching`") cached_generated_texts = [] # Print the outputs. You should see the same outputs as before. print("-" * 50) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text cached_generated_texts.append(generated_text) print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") print("-" * 50) # Compare the results and display the speedup generated_same = all( [ regular_generated_texts[i] == cached_generated_texts[i] for i in range(len(prompts)) ] ) print(f"Generated answers are the same: {generated_same}") if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/rlhf_colocate.py
examples/offline_inference/rlhf_colocate.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Demonstrates how to co-locate a vLLM inference worker and training actors on the same set of GPUs for reinforcement learning from human feedback (RLHF) workloads. Ray serves as the distributed execution framework in this example. Ray placement groups allocate both training actors and vLLM workers to the same GPU bundles, enabling fast, in-GPU communication between the two components. The script shows how to do the following: * Configure environment variables (`VLLM_RAY_PER_WORKER_GPUS` and `VLLM_RAY_BUNDLE_INDICES`) so that vLLM workers land on the desired devices. * Exchange tensors between processes by means of CUDA inter-process communication (IPC). CUDA IPC sidesteps NCCL limitations that occur when multiple processes share a single GPU. Note that this example assumes a single-node cluster with four GPUs, but Ray supports multi-node clusters. vLLM expects exclusive use of the GPUs during its initialization for memory profiling. Residual GPU activity interferes with vLLM memory profiling and causes unexpected behavior. Learn more about Ray placement groups: https://docs.ray.io/en/latest/placement-groups.html """ import gc import os import ray import torch import zmq from ray.util.placement_group import placement_group from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy from torch.multiprocessing.reductions import reduce_tensor from vllm import LLM class MyLLM(LLM): """Configure the vLLM worker for Ray placement group execution. The constructor sets environment variables that allow multiple vLLM workers to share a single physical GPU and that encode the bundle indices assigned by the placement group. Args: *args: Positional arguments forwarded to `vllm.LLM`. bundle_indices (list[int]): Placement-group bundle indices assigned to this worker. **kwargs: Keyword arguments forwarded to `vllm.LLM`. """ def __init__(self, *args, bundle_indices: list[int], **kwargs): # Prevent Ray from manipulating the top-level CUDA_VISIBLE_DEVICES variable # so that vLLM can its own device placement inside the worker. os.environ.pop("CUDA_VISIBLE_DEVICES", None) # Each worker uses 0.4 GPU so that two instances fit on the same GPUs. os.environ["VLLM_RAY_PER_WORKER_GPUS"] = "0.4" os.environ["VLLM_RAY_BUNDLE_INDICES"] = ",".join(map(str, bundle_indices)) print(f"creating LLM with bundle_indices={bundle_indices}") super().__init__(*args, **kwargs) class RayTrainingActor: """Training actor that hosts a Facebook OPT-125M model from Hugging Face. The model is loaded onto the first GPU assigned to this actor, and expose the CUDA IPC handles so that colocated vLLM workers can map tensors directly. """ def __init__(self): # Ray sets CUDA_VISIBLE_DEVICES to the GPUs assigned to this actor. from transformers import AutoModelForCausalLM self.model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") self.model.to("cuda:0") # Zero out all the parameters. for name, p in self.model.named_parameters(): p.data.zero_() torch.cuda.synchronize() # The argument for `get_device_uuid` is the index of the GPU in the # list of visible devices. from vllm.platforms import current_platform self.device_uuid = current_platform.get_device_uuid(0) self.zmq_context = zmq.Context() self.zmq_address_counter = 0 self.zmq_handle = None def report_device_id(self) -> str: return self.device_uuid def get_zmq_handles(self) -> dict[str, str]: suffix = f"{self.device_uuid}-{self.zmq_address_counter}" self.zmq_handle = f"ipc:///tmp/rl-colocate-zmq-{suffix}.sock" self.zmq_address_counter += 1 return {self.device_uuid: self.zmq_handle} def update_weights(self): # align size to avoid misaligned address align_size = 256 def get_size(p: torch.Tensor) -> int: return (p.nbytes + align_size - 1) // align_size * align_size named_parameters: dict[str, torch.nn.Parameter] = dict( self.model.named_parameters() ) max_tensor_size = max(get_size(p) for p in named_parameters.values()) # use max_tensor_size * 2 as buffer size buffer = torch.empty(max_tensor_size * 2, dtype=torch.uint8, device="cuda:0") s = self.zmq_context.socket(zmq.REQ) s.bind(self.zmq_handle) handle = reduce_tensor(buffer) offset = 0 buckets: list[tuple[list[dict], list[torch.Tensor]]] = [] named_tensors: list[dict] = [] real_tensors: list[torch.Tensor] = [] for name, p in named_parameters.items(): size = get_size(p) if offset + size > buffer.numel(): buckets.append((named_tensors, real_tensors)) named_tensors, real_tensors = [], [] offset = 0 # assume tensors are contiguous named_tensors.append( {"name": name, "dtype": p.dtype, "shape": p.shape, "offset": offset} ) real_tensors.append(p) offset += size if named_tensors: buckets.append((named_tensors, real_tensors)) s.send_pyobj(handle) s.recv() for named_tensors, real_tensors in buckets: offset = 0 for p in real_tensors: buffer[offset : offset + p.nbytes].data.copy_( p.data.view(-1).view(dtype=torch.uint8), non_blocking=True ) offset += get_size(p) torch.cuda.synchronize() s.send_pyobj(named_tensors) s.recv() s.send_pyobj(None) s.recv() s.close() del buffer gc.collect() torch.cuda.empty_cache() # Ray manages four GPUs. os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3" ray.init() # Co-locate vLLM instances and training actors on the same set of GPUs: # * GPU 0 and 1: training actor 0, training actor 1, and vLLM instance 0 # (tensor parallelism = 2). # * GPU 2 and 3: training actor 2, training actor 3, and vLLM instance 1 # (tensor parallelism = 2). pg = placement_group([{"GPU": 1, "CPU": 0}] * 4) ray.get(pg.ready()) print(f"placement group has bundles {pg.bundle_specs=}") training_actors = [] training_actor_device_ids = [] inference_engines = [] inference_engine_device_ids = [] for bundle_index in [0, 1, 2, 3]: training_actor = ray.remote( num_cpus=0, num_gpus=0.4, scheduling_strategy=PlacementGroupSchedulingStrategy( placement_group=pg, placement_group_capture_child_tasks=True, placement_group_bundle_index=bundle_index, ), )(RayTrainingActor).remote() training_actors.append(training_actor) for bundle_index, training_actor in enumerate(training_actors): device_id = ray.get(training_actor.report_device_id.remote()) print(f"training actor {bundle_index} is on {device_id}") training_actor_device_ids.append(device_id) for i, bundle_indices in enumerate([[0, 1], [2, 3]]): # Use the following syntax instead of the @ray.remote decorator so that # the placement group is customized for each bundle. llm = ray.remote( num_cpus=0, num_gpus=0, scheduling_strategy=PlacementGroupSchedulingStrategy( placement_group=pg, placement_group_capture_child_tasks=True, ), )(MyLLM).remote( model="facebook/opt-125m", enforce_eager=True, worker_extension_cls="rlhf_utils.ColocateWorkerExtension", tensor_parallel_size=2, distributed_executor_backend="ray", gpu_memory_utilization=0.4, bundle_indices=bundle_indices, ) inference_engines.append(llm) # Do not call any method on the inference engine at this point; the call # blocks until the vLLM instance finishes initialization. for i, llm in enumerate(inference_engines): inference_engine_device_ids.append( ray.get(llm.collective_rpc.remote("report_device_id", args=tuple())) ) print(f"inference engine {i} is on {inference_engine_device_ids[-1]}") # Verify placement: the first two training actors share the same GPUs as # the first inference engine. assert training_actor_device_ids[:2] == inference_engine_device_ids[0] # Verify placement: the last two training actors share the same GPUs as # the second inference engine. assert training_actor_device_ids[2:] == inference_engine_device_ids[1] print("Gather all the ZMQ handles from the training actors.") zmq_handles = {} for actor in training_actors: zmq_handles.update(ray.get(actor.get_zmq_handles.remote())) print(f"ZMQ handles: {zmq_handles}") print("Update the weights of the inference engines.") ray.get( [actor.update_weights.remote() for actor in training_actors] + [ llm.collective_rpc.remote("update_weights_from_ipc", args=(zmq_handles,)) for llm in inference_engines ] ) print("Check if the weights are updated.") for llm in inference_engines: assert ray.get(llm.collective_rpc.remote("check_weights_changed", args=tuple()))
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/rlhf_utils.py
examples/offline_inference/rlhf_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import gc from collections.abc import Callable from typing import TypedDict import torch import zmq def stateless_init_process_group(master_address, master_port, rank, world_size, device): """ vLLM provides `StatelessProcessGroup` to create a process group without considering the global process group in torch.distributed. It is recommended to create `StatelessProcessGroup`, and then initialize the data-plane communication (NCCL) between external (train processes) and vLLM workers. """ from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator from vllm.distributed.utils import StatelessProcessGroup pg = StatelessProcessGroup.create( host=master_address, port=master_port, rank=rank, world_size=world_size ) pynccl = PyNcclCommunicator(pg, device=device) return pynccl class WorkerExtension: """ The class for vLLM's worker to inherit from. By defining an extension class, the code can work no matter what is the underlying worker class. NOTE: we define this class in a separate module, and the main module should pass the full qualified name as `worker_extension_cls` argument. """ def init_weight_update_group( self, master_address, master_port, rank_offset, world_size ): from vllm.distributed.parallel_state import get_world_group rank = get_world_group().rank + rank_offset self.model_update_group = stateless_init_process_group( master_address, master_port, rank, world_size, self.device, ) def update_weight(self, name, dtype_name, shape): dtype = getattr(torch, dtype_name) weight = torch.empty(shape, dtype=dtype, device="cuda") self.model_update_group.broadcast( weight, src=0, stream=torch.cuda.current_stream() ) self.model_runner.model.load_weights(weights=[(name, weight)]) del weight def check_weights_changed(self): """ Check if the weights are updated to 0. """ weights_updated = True for name, p in self.model_runner.model.named_parameters(): weights_updated = weights_updated and torch.allclose(p, torch.zeros_like(p)) return weights_updated def rebuild_ipc( handle: tuple[Callable, tuple], device_id: int | None = None ) -> torch.Tensor: func, args = handle list_args = list(args) if device_id is not None: # the key is to change device id to the current device id # in case two processes have different CUDA_VISIBLE_DEVICES list_args[6] = device_id buffer = func(*list_args) return buffer class FlattenedTensorMetadata(TypedDict): name: str shape: torch.Size dtype: torch.dtype # specify the start offset of this tensor in shared ipc_buffer tensor offset: int class ColocateWorkerExtension: """ The class for vLLM's worker to inherit from, in the colocate setting. By defining an extension class, the code can work no matter what is the underlying worker class. NOTE: we define this class in a separate module, and the main module should pass the full qualified name as `worker_extension_cls` argument. """ def update_weights_from_ipc(self, zmq_handles: dict[str, str]): from vllm.model_executor.model_loader.utils import process_weights_after_loading assert self.device is not None if not hasattr(self, "_zmq_ctx") or self._zmq_ctx is None: self._zmq_ctx = zmq.Context() socket = self._zmq_ctx.socket(zmq.REP) socket.connect(zmq_handles[self.report_device_id()]) buffer: torch.Tensor | None = None while True: payload: tuple[Callable, tuple] | list[FlattenedTensorMetadata] | None = ( socket.recv_pyobj() ) if payload is None: # means the update is done process_weights_after_loading( self.model_runner.model, self.model_config, self.device ) torch.cuda.synchronize() socket.send(b"") break if isinstance(payload, tuple): # an ipc handle that vLLM can use `func, args = handle` # and `func(*args)` to rebuild GPU tensor. buffer = rebuild_ipc(payload, self.device.index) assert buffer.dtype == torch.uint8 socket.send(b"") continue assert isinstance(payload, list) assert buffer is not None weights = [] for item in payload: shape = item["shape"] if isinstance(shape, (list, tuple)): shape = torch.Size(shape) assert isinstance(shape, torch.Size) dtype, offset = item["dtype"], item["offset"] size = dtype.itemsize * shape.numel() tensor = buffer[offset : offset + size].view(dtype=dtype).view(shape) weights.append((item["name"], tensor)) self.model_runner.model.load_weights(weights=weights) del weights torch.cuda.synchronize() socket.send(b"") socket.close() del buffer gc.collect() torch.cuda.empty_cache() def report_device_id(self) -> str: from vllm.platforms import current_platform self.device_uuid = current_platform.get_device_uuid(self.device.index) return self.device_uuid def check_weights_changed(self): """ Check if the weights are updated to 0. """ weights_updated = True for name, p in self.model_runner.model.named_parameters(): weights_updated = weights_updated and torch.allclose(p, torch.zeros_like(p)) return weights_updated
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/vision_language_multi_image.py
examples/offline_inference/vision_language_multi_image.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This example shows how to use vLLM for running offline inference with multi-image input on vision language models for text generation, using the chat template defined by the model. """ import os from argparse import Namespace from dataclasses import asdict from typing import NamedTuple from huggingface_hub import snapshot_download from PIL.Image import Image from transformers import AutoProcessor, AutoTokenizer from vllm import LLM, EngineArgs, SamplingParams from vllm.lora.request import LoRARequest from vllm.multimodal.utils import fetch_image from vllm.utils.argparse_utils import FlexibleArgumentParser QUESTION = "What is the content of each image?" IMAGE_URLS = [ "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/duck.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/lion.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/flycatcher.jpeg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/somefish.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/starfish.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/snail.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/thistle.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/husky.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/orangetabbycat.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/guineapig.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/rabbit.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/horsepony.jpg", ] class ModelRequestData(NamedTuple): engine_args: EngineArgs prompt: str image_data: list[Image] stop_token_ids: list[int] | None = None chat_template: str | None = None lora_requests: list[LoRARequest] | None = None sampling_params: SamplingParams | None = None # NOTE: The default `max_num_seqs` and `max_model_len` may result in OOM on # lower-end GPUs. # Unless specified, these settings have been tested to work on a single L4. def load_aria(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "rhymes-ai/Aria" engine_args = EngineArgs( model=model_name, tokenizer_mode="slow", trust_remote_code=True, dtype="bfloat16", limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = "<fim_prefix><|img|><fim_suffix>\n" * len(image_urls) prompt = ( f"<|im_start|>user\n{placeholders}{question}<|im_end|>\n<|im_start|>assistant\n" ) stop_token_ids = [93532, 93653, 944, 93421, 1019, 93653, 93519] return ModelRequestData( engine_args=engine_args, prompt=prompt, stop_token_ids=stop_token_ids, image_data=[fetch_image(url) for url in image_urls], ) def load_aya_vision(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "CohereLabs/aya-vision-8b" engine_args = EngineArgs( model=model_name, max_num_seqs=2, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = [{"type": "image", "image": url} for url in image_urls] messages = [ { "role": "user", "content": [ *placeholders, {"type": "text", "text": question}, ], } ] processor = AutoProcessor.from_pretrained(model_name) prompt = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_bee(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "Open-Bee/Bee-8B-RL" engine_args = EngineArgs( model=model_name, max_model_len=16384, max_num_seqs=16, limit_mm_per_prompt={"image": len(image_urls)}, trust_remote_code=True, ) placeholders = [{"type": "image", "image": url} for url in image_urls] messages = [ { "role": "user", "content": [ *placeholders, {"type": "text", "text": question}, ], } ] processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True) prompt = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_command_a_vision(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "CohereLabs/command-a-vision-07-2025" # NOTE: This model is 122B parameters and requires tensor parallelism # Recommended to use tp=4 on H100 GPUs engine_args = EngineArgs( model=model_name, max_model_len=32768, tensor_parallel_size=4, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = [{"type": "image", "image": url} for url in image_urls] messages = [ { "role": "user", "content": [ *placeholders, {"type": "text", "text": question}, ], } ] processor = AutoProcessor.from_pretrained(model_name) prompt = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_deepseek_vl2(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "deepseek-ai/deepseek-vl2-tiny" engine_args = EngineArgs( model=model_name, max_model_len=4096, max_num_seqs=2, hf_overrides={"architectures": ["DeepseekVLV2ForCausalLM"]}, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholder = "".join( f"image_{i}:<image>\n" for i, _ in enumerate(image_urls, start=1) ) prompt = f"<|User|>: {placeholder}{question}\n\n<|Assistant|>:" return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_deepseek_ocr(question: str, image_urls: list[str]) -> ModelRequestData: from vllm.model_executor.models.deepseek_ocr import NGramPerReqLogitsProcessor model_name = "deepseek-ai/DeepSeek-OCR" engine_args = EngineArgs( model=model_name, max_num_seqs=2, limit_mm_per_prompt={"image": len(image_urls)}, logits_processors=[NGramPerReqLogitsProcessor], ) placeholder = "<image>\n" * len(image_urls) prompt = placeholder + question # The following sampling params config is taken from # the official Deepseek-OCR inference example. # (IMPORTANT) Use the custom logits processor and avoid skipping # special tokens for this model for the optimal OCR performance. sampling_params = SamplingParams( temperature=0.0, max_tokens=8192, # ngram logit processor args extra_args=dict( ngram_size=30, window_size=90, # whitelist: <td>, </td> whitelist_token_ids={128821, 128822}, ), skip_special_tokens=False, ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], sampling_params=sampling_params, ) def load_gemma3(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "google/gemma-3-4b-it" engine_args = EngineArgs( model=model_name, max_model_len=8192, max_num_seqs=2, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = [{"type": "image", "image": url} for url in image_urls] messages = [ { "role": "user", "content": [ *placeholders, {"type": "text", "text": question}, ], } ] processor = AutoProcessor.from_pretrained(model_name) prompt = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_h2ovl(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "h2oai/h2ovl-mississippi-800m" engine_args = EngineArgs( model=model_name, trust_remote_code=True, max_model_len=8192, limit_mm_per_prompt={"image": len(image_urls)}, mm_processor_kwargs={"max_dynamic_patch": 4}, ) placeholders = "\n".join( f"Image-{i}: <image>\n" for i, _ in enumerate(image_urls, start=1) ) messages = [{"role": "user", "content": f"{placeholders}\n{question}"}] tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) prompt = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) # Stop tokens for H2OVL-Mississippi # https://huggingface.co/h2oai/h2ovl-mississippi-800m stop_token_ids = [tokenizer.eos_token_id] return ModelRequestData( engine_args=engine_args, prompt=prompt, stop_token_ids=stop_token_ids, image_data=[fetch_image(url) for url in image_urls], ) # HunyuanOCR def load_hunyuan_vl(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "tencent/HunyuanOCR" engine_args = EngineArgs( model=model_name, max_model_len=8192, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholder = ( "<|hy_place▁holder▁no▁100|><|hy_place▁holder▁no▁102|><|hy_place▁holder▁no▁101|>" # noqa: E501 ) * len(image_urls) prompt = f"<|hy_begin▁of▁sentence|>{placeholder}{question}<|hy_User|>" return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_hyperclovax_seed_vision( question: str, image_urls: list[str] ) -> ModelRequestData: model_name = "naver-hyperclovax/HyperCLOVAX-SEED-Vision-Instruct-3B" tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) engine_args = EngineArgs( model=model_name, trust_remote_code=True, max_model_len=16384, limit_mm_per_prompt={"image": len(image_urls)}, ) message = {"role": "user", "content": list()} for _image_url in image_urls: message["content"].append( { "type": "image", "image": _image_url, "ocr": "", "lens_keywords": "", "lens_local_keywords": "", } ) message["content"].append( { "type": "text", "text": question, } ) prompt = tokenizer.apply_chat_template( [ message, ], tokenize=False, add_generation_prompt=True, ) return ModelRequestData( engine_args=engine_args, prompt=prompt, stop_token_ids=None, image_data=[fetch_image(url) for url in image_urls], ) def load_idefics3(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "HuggingFaceM4/Idefics3-8B-Llama3" # The configuration below has been confirmed to launch on a single L40 GPU. engine_args = EngineArgs( model=model_name, max_model_len=8192, max_num_seqs=16, enforce_eager=True, limit_mm_per_prompt={"image": len(image_urls)}, # if you are running out of memory, you can reduce the "longest_edge". # see: https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3#model-optimizations mm_processor_kwargs={ "size": {"longest_edge": 2 * 364}, }, ) placeholders = "\n".join( f"Image-{i}: <image>\n" for i, _ in enumerate(image_urls, start=1) ) prompt = f"<|begin_of_text|>User:{placeholders}\n{question}<end_of_utterance>\nAssistant:" # noqa: E501 return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_interns1(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "internlm/Intern-S1-mini" engine_args = EngineArgs( model=model_name, trust_remote_code=True, max_model_len=4096, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = "\n".join( f"Image-{i}: <IMG_CONTEXT>\n" for i, _ in enumerate(image_urls, start=1) ) messages = [{"role": "user", "content": f"{placeholders}\n{question}"}] tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) prompt = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_internvl(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "OpenGVLab/InternVL2-2B" engine_args = EngineArgs( model=model_name, trust_remote_code=True, max_model_len=4096, limit_mm_per_prompt={"image": len(image_urls)}, mm_processor_kwargs={"max_dynamic_patch": 4}, ) placeholders = "\n".join( f"Image-{i}: <image>\n" for i, _ in enumerate(image_urls, start=1) ) messages = [{"role": "user", "content": f"{placeholders}\n{question}"}] tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) prompt = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) # Stop tokens for InternVL # models variants may have different stop tokens # please refer to the model card for the correct "stop words": # https://huggingface.co/OpenGVLab/InternVL2-2B/blob/main/conversation.py stop_tokens = ["<|endoftext|>", "<|im_start|>", "<|im_end|>", "<|end|>"] stop_token_ids = [tokenizer.convert_tokens_to_ids(i) for i in stop_tokens] return ModelRequestData( engine_args=engine_args, prompt=prompt, stop_token_ids=stop_token_ids, image_data=[fetch_image(url) for url in image_urls], ) def load_keye_vl(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "Kwai-Keye/Keye-VL-8B-Preview" engine_args = EngineArgs( model=model_name, trust_remote_code=True, max_model_len=8192, max_num_seqs=5, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = [{"type": "image", "image": url} for url in image_urls] messages = [ { "role": "user", "content": [ *placeholders, {"type": "text", "text": question}, ], }, ] processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True) prompt = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) image_data = [fetch_image(url) for url in image_urls] return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=image_data, ) def load_keye_vl1_5(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "Kwai-Keye/Keye-VL-1_5-8B" engine_args = EngineArgs( model=model_name, trust_remote_code=True, max_model_len=32768, max_num_seqs=5, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = [{"type": "image", "image": url} for url in image_urls] messages = [ { "role": "user", "content": [ *placeholders, {"type": "text", "text": question}, ], }, ] processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True) prompt = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) image_data = [fetch_image(url) for url in image_urls] return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=image_data, ) def load_kimi_vl(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "moonshotai/Kimi-VL-A3B-Instruct" engine_args = EngineArgs( model=model_name, trust_remote_code=True, max_model_len=4096, max_num_seqs=4, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = [{"type": "image", "image": url} for url in image_urls] messages = [ { "role": "user", "content": [ *placeholders, {"type": "text", "text": question}, ], } ] processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True) prompt = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_llama4(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "meta-llama/Llama-4-Scout-17B-16E-Instruct" engine_args = EngineArgs( model=model_name, max_model_len=131072, tensor_parallel_size=8, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = [{"type": "image", "image": url} for url in image_urls] messages = [ { "role": "user", "content": [ *placeholders, {"type": "text", "text": question}, ], } ] processor = AutoProcessor.from_pretrained(model_name) prompt = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_llava(question: str, image_urls: list[str]) -> ModelRequestData: # NOTE: CAUTION! Original Llava models wasn't really trained on multi-image inputs, # it will generate poor response for multi-image inputs! model_name = "llava-hf/llava-1.5-7b-hf" engine_args = EngineArgs( model=model_name, max_num_seqs=16, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = [{"type": "image", "image": url} for url in image_urls] messages = [ { "role": "user", "content": [ *placeholders, {"type": "text", "text": question}, ], } ] processor = AutoProcessor.from_pretrained(model_name) prompt = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_llava_next(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "llava-hf/llava-v1.6-mistral-7b-hf" engine_args = EngineArgs( model=model_name, max_model_len=8192, max_num_seqs=16, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = [{"type": "image", "image": url} for url in image_urls] messages = [ { "role": "user", "content": [ *placeholders, {"type": "text", "text": question}, ], } ] processor = AutoProcessor.from_pretrained(model_name) prompt = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_llava_onevision(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "llava-hf/llava-onevision-qwen2-7b-ov-hf" engine_args = EngineArgs( model=model_name, max_model_len=16384, max_num_seqs=16, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = [{"type": "image", "image": url} for url in image_urls] messages = [ { "role": "user", "content": [ *placeholders, {"type": "text", "text": question}, ], } ] processor = AutoProcessor.from_pretrained(model_name) prompt = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_mistral3(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "mistralai/Mistral-Small-3.1-24B-Instruct-2503" # Adjust this as necessary to fit in GPU engine_args = EngineArgs( model=model_name, max_model_len=8192, max_num_seqs=2, tensor_parallel_size=2, limit_mm_per_prompt={"image": len(image_urls)}, ignore_patterns=["consolidated.safetensors"], ) placeholders = "[IMG]" * len(image_urls) prompt = f"<s>[INST]{question}\n{placeholders}[/INST]" return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_nvlm_d(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "nvidia/NVLM-D-72B" # Adjust this as necessary to fit in GPU engine_args = EngineArgs( model=model_name, trust_remote_code=True, max_model_len=8192, tensor_parallel_size=4, limit_mm_per_prompt={"image": len(image_urls)}, mm_processor_kwargs={"max_dynamic_patch": 4}, ) placeholders = "\n".join( f"Image-{i}: <image>\n" for i, _ in enumerate(image_urls, start=1) ) messages = [{"role": "user", "content": f"{placeholders}\n{question}"}] tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) prompt = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) # Ovis def load_ovis(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "AIDC-AI/Ovis2-1B" engine_args = EngineArgs( model=model_name, max_model_len=8192, max_num_seqs=2, trust_remote_code=True, dtype="half", limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = "\n".join( f"Image-{i}: <image>\n" for i, _ in enumerate(image_urls, start=1) ) messages = [{"role": "user", "content": f"{placeholders}\n{question}"}] tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) prompt = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) # ovis2_5 def load_ovis2_5(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "AIDC-AI/Ovis2.5-2B" engine_args = EngineArgs( model=model_name, max_model_len=8192, max_num_seqs=2, trust_remote_code=True, dtype="half", limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = "\n".join( f"Image-{i}: <image>\n" for i, _ in enumerate(image_urls, start=1) ) prompt = ( f"<|im_start|>user\n\n{placeholders}\n{question}<|im_end|>\n" "<|im_start|>assistant\n" ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_paddleocr_vl(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "PaddlePaddle/PaddleOCR-VL" engine_args = EngineArgs( model=model_name, trust_remote_code=True, max_model_len=8192, max_num_seqs=2, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = "<|IMAGE_START|><|IMAGE_PLACEHOLDER|><|IMAGE_END|>" * len(image_urls) prompt = f"<|begin_of_sentence|>User: {question}{placeholders}\nAssistant: " return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_pixtral_hf(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "mistral-community/pixtral-12b" # Adjust this as necessary to fit in GPU engine_args = EngineArgs( model=model_name, max_model_len=8192, max_num_seqs=2, tensor_parallel_size=2, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = "[IMG]" * len(image_urls) prompt = f"<s>[INST]{question}\n{placeholders}[/INST]" return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_phi3v(question: str, image_urls: list[str]) -> ModelRequestData: # num_crops is an override kwarg to the multimodal image processor; # For some models, e.g., Phi-3.5-vision-instruct, it is recommended # to use 16 for single frame scenarios, and 4 for multi-frame. # # Generally speaking, a larger value for num_crops results in more # tokens per image instance, because it may scale the image more in # the image preprocessing. Some references in the model docs and the # formula for image tokens after the preprocessing # transform can be found below. # # https://huggingface.co/microsoft/Phi-3.5-vision-instruct#loading-the-model-locally # https://huggingface.co/microsoft/Phi-3.5-vision-instruct/blob/main/processing_phi3_v.py#L194 engine_args = EngineArgs( model="microsoft/Phi-3.5-vision-instruct", trust_remote_code=True, max_model_len=4096, max_num_seqs=2, limit_mm_per_prompt={"image": len(image_urls)}, mm_processor_kwargs={"num_crops": 4}, ) placeholders = "\n".join( f"<|image_{i}|>" for i, _ in enumerate(image_urls, start=1) ) prompt = f"<|user|>\n{placeholders}\n{question}<|end|>\n<|assistant|>\n" return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], ) def load_phi4mm(question: str, image_urls: list[str]) -> ModelRequestData: """ Phi-4-multimodal-instruct supports both image and audio inputs. Here, we show how to process multi images inputs. """ model_path = snapshot_download("microsoft/Phi-4-multimodal-instruct") # Since the vision-lora and speech-lora co-exist with the base model, # we have to manually specify the path of the lora weights. vision_lora_path = os.path.join(model_path, "vision-lora") engine_args = EngineArgs( model=model_path, trust_remote_code=True, max_model_len=4096, max_num_seqs=2, limit_mm_per_prompt={"image": len(image_urls)}, enable_lora=True, max_lora_rank=320, # Note - mm_processor_kwargs can also be passed to generate/chat calls mm_processor_kwargs={"dynamic_hd": 4}, ) placeholders = "".join(f"<|image_{i}|>" for i, _ in enumerate(image_urls, start=1)) prompt = f"<|user|>{placeholders}{question}<|end|><|assistant|>" return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=[fetch_image(url) for url in image_urls], lora_requests=[LoRARequest("vision", 1, vision_lora_path)], ) def load_qwen_vl_chat(question: str, image_urls: list[str]) -> ModelRequestData: model_name = "Qwen/Qwen-VL-Chat" engine_args = EngineArgs( model=model_name, trust_remote_code=True, max_model_len=1024, max_num_seqs=2, hf_overrides={"architectures": ["QwenVLForConditionalGeneration"]}, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = "".join( f"Picture {i}: <img></img>\n" for i, _ in enumerate(image_urls, start=1) ) # This model does not have a chat_template attribute on its tokenizer, # so we need to explicitly pass it. We use ChatML since it's used in the # generation utils of the model: # https://huggingface.co/Qwen/Qwen-VL-Chat/blob/main/qwen_generation_utils.py#L265 tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) # Copied from: https://huggingface.co/docs/transformers/main/en/chat_templating chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" # noqa: E501 messages = [{"role": "user", "content": f"{placeholders}\n{question}"}] prompt = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True, chat_template=chat_template, ) stop_tokens = ["<|endoftext|>", "<|im_start|>", "<|im_end|>"] stop_token_ids = [tokenizer.convert_tokens_to_ids(i) for i in stop_tokens] return ModelRequestData( engine_args=engine_args, prompt=prompt, stop_token_ids=stop_token_ids, image_data=[fetch_image(url) for url in image_urls], chat_template=chat_template, ) def load_qwen2_vl(question: str, image_urls: list[str]) -> ModelRequestData: try: from qwen_vl_utils import smart_resize except ModuleNotFoundError: print( "WARNING: `qwen-vl-utils` not installed, input images will not " "be automatically resized. You can enable this functionality by " "`pip install qwen-vl-utils`." ) smart_resize = None model_name = "Qwen/Qwen2-VL-7B-Instruct" # Tested on L40 engine_args = EngineArgs( model=model_name, max_model_len=32768 if smart_resize is None else 4096, max_num_seqs=5, limit_mm_per_prompt={"image": len(image_urls)}, ) placeholders = [{"type": "image", "image": url} for url in image_urls] messages = [ {"role": "system", "content": "You are a helpful assistant."}, { "role": "user", "content": [ *placeholders, {"type": "text", "text": question}, ], }, ] processor = AutoProcessor.from_pretrained(model_name) prompt = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) if smart_resize is None: image_data = [fetch_image(url) for url in image_urls] else: def post_process_image(image: Image) -> Image: width, height = image.size resized_height, resized_width = smart_resize( height, width, max_pixels=1024 * 28 * 28 ) return image.resize((resized_width, resized_height)) image_data = [post_process_image(fetch_image(url)) for url in image_urls] return ModelRequestData( engine_args=engine_args, prompt=prompt, image_data=image_data, ) def load_qwen2_5_vl(question: str, image_urls: list[str]) -> ModelRequestData: try: from qwen_vl_utils import smart_resize except ModuleNotFoundError: print(
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/torchrun_dp_example.py
examples/offline_inference/torchrun_dp_example.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ experimental support for data-parallel inference with torchrun Note the data load balancing and distribution is done out of the vllm engine, no internal lb supported in external_launcher mode. To run this example: ```bash $ torchrun --nproc-per-node=2 examples/offline_inference/torchrun_dp_example.py ``` With custom parallelism settings: ```bash $ torchrun --nproc-per-node=8 examples/offline_inference/torchrun_dp_example.py \ --tp-size=2 --pp-size=1 --dp-size=4 --enable-ep ``` """ import argparse from vllm import LLM, SamplingParams def parse_args(): parser = argparse.ArgumentParser( description="Data-parallel inference with torchrun" ) parser.add_argument( "--tp-size", type=int, default=1, help="Tensor parallel size (default: 1)", ) parser.add_argument( "--pp-size", type=int, default=1, help="Pipeline parallel size (default: 1)", ) parser.add_argument( "--dp-size", type=int, default=2, help="Data parallel size (default: 2)", ) parser.add_argument( "--enable-ep", action="store_true", help="Enable expert parallel (default: False)", ) parser.add_argument( "--model", type=str, default="microsoft/Phi-mini-MoE-instruct", help="Model name or path (default: microsoft/Phi-mini-MoE-instruct)", ) parser.add_argument( "--max-model-len", type=int, default=4096, help="Maximum model length (default: 4096)", ) parser.add_argument( "--gpu-memory-utilization", type=float, default=0.6, help="GPU memory utilization (default: 0.6)", ) parser.add_argument( "--seed", type=int, default=1, help="Random seed (default: 1)", ) return parser.parse_args() args = parse_args() # Create prompts, the same across all ranks prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create sampling parameters, the same across all ranks sampling_params = SamplingParams(temperature=0.8, top_p=0.95) # Use `distributed_executor_backend="external_launcher"` so that # this llm engine/instance only creates one worker. # it is important to set an explicit seed to make sure that # all ranks have the same random seed, so that sampling can be # deterministic across ranks. llm = LLM( model=args.model, tensor_parallel_size=args.tp_size, data_parallel_size=args.dp_size, pipeline_parallel_size=args.pp_size, enable_expert_parallel=args.enable_ep, distributed_executor_backend="external_launcher", max_model_len=args.max_model_len, gpu_memory_utilization=args.gpu_memory_utilization, seed=args.seed, ) dp_rank = llm.llm_engine.vllm_config.parallel_config.data_parallel_rank dp_size = llm.llm_engine.vllm_config.parallel_config.data_parallel_size prompts = [ f"{idx}.{prompt}" for idx, prompt in enumerate(prompts) if idx % dp_size == dp_rank ] outputs = llm.generate(prompts, sampling_params) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print( f"DP Rank: {dp_rank} Prompt: {prompt!r}\nGenerated text: {generated_text!r}\n" ) """ Further tips: 1. to communicate control messages across all ranks, use the cpu group, a PyTorch ProcessGroup with GLOO backend. ```python from vllm.distributed.parallel_state import get_world_group cpu_group = get_world_group().cpu_group torch_rank = dist.get_rank(group=cpu_group) if torch_rank == 0: # do something for rank 0, e.g. saving the results to disk. ``` 2. to communicate data across all ranks, use the model's device group, a PyTorch ProcessGroup with NCCL backend. ```python from vllm.distributed.parallel_state import get_world_group device_group = get_world_group().device_group ``` 3. to access the model directly in every rank, use the following code: ```python llm.llm_engine.model_executor.driver_worker.worker.model_runner.model ``` """
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false